forked from TrueCloudLab/distribution
Compare commits
No commits in common. "empty" and "tcl/master" have entirely different histories.
empty
...
tcl/master
587 changed files with 77034 additions and 2 deletions
1
.dockerignore
Normal file
1
.dockerignore
Normal file
|
@ -0,0 +1 @@
|
|||
bin/
|
23
.forgejo/workflows/builds.yml
Normal file
23
.forgejo/workflows/builds.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.21', '1.22' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
||||
|
||||
- name: Check dirty suffix
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
21
.forgejo/workflows/vulncheck.yml
Normal file
21
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.11'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
48
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
48
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
name: Bug report
|
||||
description: Create a report to help us improve
|
||||
labels:
|
||||
- kind/bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to report a bug!
|
||||
If this is a security issue please report it to the [Distributions Security Mailing List](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the bug
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: Reproduce
|
||||
description: Steps to reproduce the bug
|
||||
placeholder: |
|
||||
1. start registry version X ...
|
||||
2. `docker push image:tag` ...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: What is the expected behavior?
|
||||
placeholder: |
|
||||
E.g. "registry returns an incorrect API error"
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: registry version
|
||||
description: Output of `registry --version`. Alternatively tell us the docker image tag.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide such as logs, system info, environment, etc.
|
||||
validations:
|
||||
required: false
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Security and Vulnerabilities
|
||||
url: https://github.com/distribution/distribution/blob/main/SECURITY.md
|
||||
about: Please report any security issues or vulnerabilities responsibly to the distribution maintainers team. Please do not use the public issue tracker.
|
||||
- name: Questions and Discussions
|
||||
url: https://github.com/distribution/distribution/discussions/new/choose
|
||||
about: Use Github Discussions to ask questions and/or open discussion topics.
|
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
name: Feature request
|
||||
description: Missing functionality? Come tell us about it!
|
||||
labels:
|
||||
- kind/feature
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What is the feature you want to see?
|
||||
validations:
|
||||
required: true
|
8
.github/dependabot.yml
vendored
Normal file
8
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
labels:
|
||||
- "dependencies"
|
61
.github/labeler.yml
vendored
Normal file
61
.github/labeler.yml
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
area/api:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/api/**
|
||||
- registry/handlers/**
|
||||
area/auth:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/auth/**
|
||||
area/build:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- Makefile
|
||||
- Dockerfile
|
||||
- docker-bake.hcl
|
||||
- dockerfiles/**
|
||||
area/cache:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/storage/cache/**
|
||||
area/ci:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- .github/**
|
||||
- tests/**
|
||||
- testutil/**
|
||||
area/config:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- configuration/**
|
||||
area/docs:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- README.md
|
||||
- docs/**/*.md
|
||||
area/proxy:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/proxy/**
|
||||
area/storage:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/storage/**
|
||||
area/storage/azure:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/storage/driver/azure/**
|
||||
area/storage/gcs:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/storage/driver/gcs/**
|
||||
area/storage/s3:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- registry/storage/driver/s3-aws/**
|
||||
dependencies:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file:
|
||||
- vendor/**
|
||||
- go.mod
|
||||
- go.sum
|
161
.github/workflows/build.yml
vendored
Normal file
161
.github/workflows/build.yml
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
name: build
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
DOCKERHUB_SLUG: distribution/distribution
|
||||
GHCR_SLUG: ghcr.io/${{ github.repository }}
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go:
|
||||
- 1.21.8
|
||||
- 1.22.1
|
||||
target:
|
||||
- test-coverage
|
||||
- test-cloud-storage
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
make ${{ matrix.target }}
|
||||
-
|
||||
name: Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
directory: ./
|
||||
|
||||
build:
|
||||
permissions:
|
||||
contents: write # to create GitHub release (softprops/action-gh-release)
|
||||
packages: write # so we can push the image to GHCR
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.DOCKERHUB_SLUG }}
|
||||
${{ env.GHCR_SLUG }}
|
||||
### versioning strategy
|
||||
### push semver tag v3.2.1 on main (default branch)
|
||||
# distribution/distribution:3.2.1
|
||||
# distribution/distribution:3.2
|
||||
# distribution/distribution:3
|
||||
# distribution/distribution:latest
|
||||
### push semver prelease tag v3.0.0-beta.1 on main (default branch)
|
||||
# distribution/distribution:3.0.0-beta.1
|
||||
### push on main
|
||||
# distribution/distribution:edge
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=ref,event=pr
|
||||
type=edge
|
||||
labels: |
|
||||
org.opencontainers.image.title=Distribution
|
||||
org.opencontainers.image.description=The toolkit to pack, ship, store, and distribute container content
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Log in to GitHub Container registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
-
|
||||
name: Build artifacts
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: artifact-all
|
||||
-
|
||||
name: Rename provenance
|
||||
run: |
|
||||
for pdir in ./bin/*/; do
|
||||
(
|
||||
cd "$pdir"
|
||||
binname=$(find . -name '*.tar.gz')
|
||||
filename=$(basename "${binname%.tar.gz}")
|
||||
mv "provenance.json" "${filename}.provenance.json"
|
||||
)
|
||||
done
|
||||
-
|
||||
name: Move and list artifacts
|
||||
run: |
|
||||
mv ./bin/**/* ./bin/
|
||||
tree -nh ./bin
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4.3.0
|
||||
with:
|
||||
name: registry
|
||||
path: ./bin/*
|
||||
if-no-files-found: error
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
${{ steps.meta.outputs.bake-file }}
|
||||
targets: image-all
|
||||
push: ${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
||||
-
|
||||
name: GitHub Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
draft: true
|
||||
files: |
|
||||
bin/*.tar.gz
|
||||
bin/*.provenance.json
|
||||
bin/*.sha256
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
55
.github/workflows/codeql-analysis.yml
vendored
Normal file
55
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
name: CodeQL
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 12 * * 6'
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
security-events: write # to upload SARIF results (github/codeql-action/analyze)
|
||||
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language:
|
||||
- go
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
-
|
||||
name: Checkout HEAD on PR
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
git checkout HEAD^2
|
||||
-
|
||||
name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3.22.12
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
-
|
||||
name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3.22.12
|
||||
-
|
||||
name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3.22.12
|
56
.github/workflows/conformance.yml
vendored
Normal file
56
.github/workflows/conformance.yml
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
name: conformance
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
run-conformance-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: image-local
|
||||
-
|
||||
name: Start distribution server
|
||||
run: |
|
||||
IP=`hostname -I | awk '{print $1}'`
|
||||
echo "IP=$IP" >> $GITHUB_ENV
|
||||
echo "OCI_ROOT_URL=http://$IP:5000" >> $GITHUB_ENV
|
||||
DISTRIBUTION_REF="registry:local"
|
||||
docker run --rm -p 5000:5000 -e REGISTRY_STORAGE_DELETE_ENABLED=true -idt "registry:local"
|
||||
-
|
||||
name: Run OCI Distribution Spec conformance tests
|
||||
uses: opencontainers/distribution-spec@v1.0.1
|
||||
env:
|
||||
OCI_ROOT_URL: ${{ env.OCI_ROOT_URL }}
|
||||
OCI_NAMESPACE: oci-conformance/distribution-test
|
||||
OCI_TEST_PULL: 1
|
||||
OCI_TEST_PUSH: 1
|
||||
OCI_TEST_CONTENT_DISCOVERY: 1
|
||||
OCI_TEST_CONTENT_MANAGEMENT: 1
|
||||
OCI_HIDE_SKIPPED_WORKFLOWS: 1
|
||||
-
|
||||
name: Move test results
|
||||
run: mkdir -p .out/ && mv {report.html,junit.xml} .out/
|
||||
-
|
||||
name: Upload test results
|
||||
uses: actions/upload-artifact@v4.3.0
|
||||
with:
|
||||
name: oci-test-results-${{ github.sha }}
|
||||
path: .out/
|
||||
if-no-files-found: error
|
35
.github/workflows/dockerhub-readme.yml
vendored
Normal file
35
.github/workflows/dockerhub-readme.yml
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
name: dockerhub-readme
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- '.github/workflows/dockerhub-readme.yml'
|
||||
- 'docs/dockerhub.md'
|
||||
|
||||
env:
|
||||
DOCKERHUB_SLUG: distribution/distribution
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Update Docker Hub README
|
||||
uses: peter-evans/dockerhub-description@v4
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ env.DOCKERHUB_SLUG }}
|
||||
readme-filepath: ./docs/dockerhub.md
|
72
.github/workflows/docs.yml
vendored
Normal file
72
.github/workflows/docs.yml
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
name: docs
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/workflows/docs.yml
|
||||
- dockerfiles/docs.Dockerfile
|
||||
- docs/**
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
# Build the site and upload artifacts using actions/upload-pages-artifact
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build docs
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
files: |
|
||||
docker-bake.hcl
|
||||
targets: docs-export
|
||||
provenance: false
|
||||
set: |
|
||||
*.cache-from=type=gha,scope=docs
|
||||
*.cache-to=type=gha,scope=docs,mode=max
|
||||
- name: Fix permissions
|
||||
run: |
|
||||
chmod -c -R +rX "./build/docs" | while read line; do
|
||||
echo "::warning title=Invalid file permissions automatically fixed::$line"
|
||||
done
|
||||
- name: Upload Pages artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./build/docs
|
||||
|
||||
# Deploy job
|
||||
deploy:
|
||||
# Add a dependency to the build job
|
||||
needs: build
|
||||
|
||||
# Grant GITHUB_TOKEN the permissions required to make a Pages deployment
|
||||
permissions:
|
||||
pages: write # to deploy to Pages
|
||||
id-token: write # to verify the deployment originates from an appropriate source
|
||||
|
||||
# Deploy to the github-pages environment
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
# Specify runner + deployment step
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4 # or the latest "vX.X.X" version tag for this action
|
56
.github/workflows/e2e.yml
vendored
Normal file
56
.github/workflows/e2e.yml
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
name: e2e
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
run-e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Build image
|
||||
uses: docker/bake-action@v4
|
||||
with:
|
||||
targets: image-local
|
||||
-
|
||||
name: Start distribution server
|
||||
run: |
|
||||
docker run --rm -p 5000:5000 -p 5001:5001 -idt "registry:local"
|
||||
-
|
||||
name: Tests
|
||||
run: |
|
||||
bash ./tests/push.sh 127.0.0.0
|
||||
|
||||
run-e2e-test-s3-storage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Start E2E environment
|
||||
run: |
|
||||
make start-e2e-s3-env
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
bash ./tests/push.sh 127.0.0.0
|
||||
make stop-e2e-s3-env
|
25
.github/workflows/fossa.yml
vendored
Normal file
25
.github/workflows/fossa.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
name: FOSSA License Scanning
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
- pull_request
|
||||
- push
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
scan-license:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@v3
|
||||
with:
|
||||
fossa-api-key: cac3dc8d4f2ba86142f6c0f2199a160f
|
19
.github/workflows/label.yaml
vendored
Normal file
19
.github/workflows/label.yaml
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
name: Pull Request Labeler
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
jobs:
|
||||
labeler:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
dot: true
|
60
.github/workflows/scorecards.yml
vendored
Normal file
60
.github/workflows/scorecards.yml
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
name: Scorecards supply-chain security
|
||||
on:
|
||||
# Only the default branch is supported.
|
||||
branch_protection_rule:
|
||||
schedule:
|
||||
- cron: '26 0 * * 0'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecards analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Used to receive a badge.
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) Read-only PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecards on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
|
||||
# repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # tag=v4.3.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@1500a131381b66de0c52ac28abb13cd79f4b7ecc # tag=v2.22.12
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
38
.github/workflows/validate.yml
vendored
Normal file
38
.github/workflows/validate.yml
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
name: validate
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- lint
|
||||
- validate-vendor
|
||||
- validate-git
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Run
|
||||
run: |
|
||||
make ${{ matrix.target }}
|
||||
env:
|
||||
COMMIT_RANGE: ${{ format('{0}..{1}', github.sha, 'HEAD') }}
|
45
.gitignore
vendored
Normal file
45
.gitignore
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# never checkin from the bin file (for now)
|
||||
bin/*
|
||||
|
||||
# Test key files
|
||||
*.pem
|
||||
|
||||
# Cover profiles
|
||||
*.out
|
||||
|
||||
# Editor/IDE specific files.
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.idea/*
|
||||
|
||||
tests/miniodata
|
||||
|
||||
# Docs
|
||||
**/.hugo_build.lock
|
||||
docs/resources
|
||||
docs/public
|
28
.golangci.yml
Normal file
28
.golangci.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- govet
|
||||
- unused
|
||||
- misspell
|
||||
- bodyclose
|
||||
- prealloc
|
||||
- errcheck
|
||||
- tparallel
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
# TODO(thaJeztah): temporarily disabled the "unused-parameter" check.
|
||||
# It produces many warnings, and some of those may need to be looked at.
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
|
||||
issues:
|
||||
deadline: 2m
|
||||
exlude-dirs:
|
||||
- vendor
|
194
.mailmap
Normal file
194
.mailmap
Normal file
|
@ -0,0 +1,194 @@
|
|||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Alexander Morozov <lk4d4math@gmail.com>
|
||||
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
||||
Anders Ingemann <aim@orbit.online>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com> <kendru@users.noreply.github.com>
|
||||
Andrey Smirnov <andrey.smirnov@siderolabs.com>
|
||||
Andrii Soldatenko <andrii.soldatenko@gmail.com>
|
||||
Andrii Soldatenko <andrii.soldatenko@gmail.com> <andrii.soldatenko@dynatrace.com>
|
||||
Anthony Ramahay <thewolt@gmail.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
Austin Vazquez <macedonv@amazon.com>
|
||||
Benjamin Schanzel <benjamin.schanzel@bmw.de>
|
||||
Brian Bland <brian.t.bland@gmail.com>
|
||||
Brian Bland <brian.t.bland@gmail.com> <brian.bland@docker.com>
|
||||
Brian Bland <brian.t.bland@gmail.com> <r4nd0m1n4t0r@gmail.com>
|
||||
Chad Faragher <wyckster@hotmail.com>
|
||||
Cory Snider <csnider@mirantis.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||
Daniel Nephin <dnephin@gmail.com>
|
||||
Daniel Nephin <dnephin@gmail.com> <dnephin@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
||||
David Wu <dwu7401@gmail.com>
|
||||
David Wu <dwu7401@gmail.com> <david.wu@docker.com>
|
||||
Derek McGowan <derek@mcg.dev>
|
||||
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
|
||||
Dimitar Kostadinov <dimitar.kostadinov@sap.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Emmanuel Ferdman <emmanuelferdman@gmail.com>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Eric Yang <windfarer@gmail.com> <Windfarer@users.noreply.github.com>
|
||||
Eric Yang <windfarer@gmail.com> <qizhao.yang@daocloud.io>
|
||||
Erica Windisch <erica@windisch.us>
|
||||
Erica Windisch <erica@windisch.us> <eric@windisch.us>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume.charmes@dotcloud.com>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@charmes.net>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@docker.com>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@dotcloud.com>
|
||||
Hayley Swimelar <hswimelar@gmail.com>
|
||||
Ismail Alidzhikov <i.alidjikov@gmail.com>
|
||||
Jaime Martinez <jmartinez@gitlab.com>
|
||||
James Hewitt <james.hewitt@uk.ibm.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@docker.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@google.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <acidburn@microsoft.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jess@docker.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jess@mesosphere.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jessfraz@google.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <me@jessfraz.com>
|
||||
Jessica Frazelle <jess@oxide.computer> <princess@docker.com>
|
||||
Joao Fernandes <joaofnfernandes@gmail.com>
|
||||
Joao Fernandes <joaofnfernandes@gmail.com> <joao.fernandes@docker.com>
|
||||
João Pereira <484633+joaodrp@users.noreply.github.com>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
||||
John Howard <github@lowenna.com>
|
||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
||||
Josh Hawn <jlhawn@berkeley.edu>
|
||||
Josh Hawn <jlhawn@berkeley.edu> <josh.hawn@docker.com>
|
||||
Joyce Brum <joycebrumu.u@gmail.com>
|
||||
Joyce Brum <joycebrumu.u@gmail.com> <joycebrum@google.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Kirat Singh <kirat.singh@gmail.com>
|
||||
Kirat Singh <kirat.singh@gmail.com> <kirat.singh@beacon.io>
|
||||
Kirat Singh <kirat.singh@gmail.com> <kirat.singh@wsq.io>
|
||||
Kyle Squizzato <ksquizz@gmail.com>
|
||||
Liang Zheng <zhengliang0901@gmail.com>
|
||||
Luca Bruno <lucab@debian.org>
|
||||
Luca Bruno <lucab@debian.org> <luca.bruno@coreos.com>
|
||||
Mahmoud Kandil <47168819+MahmoudKKandil@users.noreply.github.com>
|
||||
Manish Tomar <manish.tomar@docker.com>
|
||||
Manish Tomar <manish.tomar@docker.com> <manishtomar@users.noreply.github.com>
|
||||
Maria Bermudez <bermudez.mt@gmail.com>
|
||||
Maria Bermudez <bermudez.mt@gmail.com> <bermudezmt@users.noreply.github.com>
|
||||
Markus Thömmes <markusthoemmes@me.com>
|
||||
Matt Linville <matt@linville.me>
|
||||
Matt Linville <matt@linville.me> <misty@apache.org>
|
||||
Matt Linville <matt@linville.me> <misty@docker.com>
|
||||
Michael Crosby <crosbymichael@gmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <crosby.michael@gmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <michael@crosbymichael.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <michael@docker.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Michal Minar <miminar@redhat.com> Michal Minář <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
|
||||
Mikel Rychliski <mikel@mikelr.com>
|
||||
Milos Gajdos <milosthegajdos@gmail.com>
|
||||
Milos Gajdos <milosthegajdos@gmail.com> <1392526+milosgajdos@users.noreply.github.com>
|
||||
Milos Gajdos <milosthegajdos@gmail.com> <milosgajdos83@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
Nikita Tarasov <nikita@mygento.ru> <luckyraul@users.noreply.github.com>
|
||||
Oleg Bulatov <oleg@bulatov.me>
|
||||
Oleg Bulatov <oleg@bulatov.me> <obulatov@redhat.com>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Gambier <olivier@docker.com> <dmp42@users.noreply.github.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Omer Cohen <git@omer.io> <git@omerc.net>
|
||||
Paul Meyer <49727155+katexochen@users.noreply.github.com>
|
||||
Per Lundberg <perlun@gmail.com>
|
||||
Per Lundberg <perlun@gmail.com> <per.lundberg@ecraft.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Dave Hello <hsu@peterdavehello.org> <PeterDaveHello@users.noreply.github.com>
|
||||
Phil Estes <estesp@gmail.com>
|
||||
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
|
||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@gmail.com> <richard.scothern@docker.com>
|
||||
Rober Morales-Chaparro <rober.morales@rstor.io>
|
||||
Rober Morales-Chaparro <rober.morales@rstor.io> <rober@rstor.io>
|
||||
Robin Ketelbuters <robin.ketelbuters@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
|
||||
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Sharif Nassar <sharif@mrwacky.com> <mrwacky42@users.noreply.github.com>
|
||||
Solomon Hykes <solomon@dagger.io>
|
||||
Solomon Hykes <solomon@dagger.io> <s@docker.com>
|
||||
Solomon Hykes <solomon@dagger.io> <solomon.hykes@dotcloud.com>
|
||||
Solomon Hykes <solomon@dagger.io> <solomon@docker.com>
|
||||
Solomon Hykes <solomon@dagger.io> <solomon@dotcloud.com>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
||||
Steven Kalt <SKalt@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||
Sylvain DESGRAIS <sylvain.desgrais@gmail.com>
|
||||
Tadeusz Dudkiewicz <tadeusz.dudkiewicz@rtbhouse.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
||||
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
||||
Victor Vieux <victorvieux@gmail.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <dev@vvieux.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <victor.vieux@docker.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <victor.vieux@dotcloud.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <victor@docker.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <victor@dotcloud.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <victorvieux@gmail.com>
|
||||
Victor Vieux <victorvieux@gmail.com> <vieux@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com> <londoncalling@users.noreply.github.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
||||
Vincent Giersch <vincent@giersch.fr>
|
||||
Vincent Giersch <vincent@giersch.fr> <vincent.giersch@ovh.net>
|
||||
Wang Yan <wangyan@vmware.com>
|
||||
Wen-Quan Li <legendarilylwq@gmail.com>
|
||||
Wen-Quan Li <legendarilylwq@gmail.com> <wenquan.li@hp.com>
|
||||
Wen-Quan Li <legendarilylwq@gmail.com> <wenquan.li@hpe.com>
|
||||
Yu Wang <yuwa@microsoft.com>
|
||||
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
|
||||
baojiangnan <baojiangnan@meituan.com>
|
||||
baojiangnan <baojiangnan@meituan.com> <baojn1998@163.com>
|
||||
erezrokah <erezrokah@users.noreply.github.com>
|
||||
goodactive <goodactive@qq.com>
|
||||
gotgelf <gotgelf@gmail.com>
|
||||
guoguangwu <guoguangwug@gmail.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
harche <p.harshal@gmail.com> <harche@users.noreply.github.com>
|
||||
icefed <zlwangel@gmail.com>
|
||||
oliver-goetz <o.goetz@sap.com>
|
||||
xiaoxiangxianzi <zhaoyizheng@outlook.com>
|
11
ADOPTERS.md
Normal file
11
ADOPTERS.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
Docker Hub https://hub.docker.com/
|
||||
|
||||
GitLab Container Registry https://docs.gitlab.com/ee/user/packages/container_registry/
|
||||
|
||||
GitHub Container Registry https://docs.github.com/en/free-pro-team@latest/packages/guides/about-github-container-registry
|
||||
|
||||
Harbor, CNCF Graduated project https://goharbor.io/
|
||||
|
||||
VMware Harbor Registry https://docs.pivotal.io/partners/vmware-harbor/index.html
|
||||
|
||||
DigitalOcean Container Registry https://www.digitalocean.com/products/container-registry/
|
530
AUTHORS
Normal file
530
AUTHORS
Normal file
|
@ -0,0 +1,530 @@
|
|||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see dockerfiles/authors.Dockerfile.
|
||||
|
||||
a-palchikov <deemok@gmail.com>
|
||||
Aaron Lehmann <alehmann@netflix.com>
|
||||
Aaron Schlesinger <aschlesinger@deis.com>
|
||||
Aaron Vinson <avinson.public@gmail.com>
|
||||
Adam Dobrawy <ad-m@users.noreply.github.com>
|
||||
Adam Duke <adam.v.duke@gmail.com>
|
||||
Adam Enger <adamenger@gmail.com>
|
||||
Adam Kaplan <adam.kaplan@redhat.com>
|
||||
Adam Wolfe Gordon <awg@digitalocean.com>
|
||||
AdamKorcz <adam@adalogics.com>
|
||||
Adrian Mouat <adrian.mouat@gmail.com>
|
||||
Adrian Plata <adrian.plata@docker.com>
|
||||
Adrien Duermael <adrien@duermael.com>
|
||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||
Aleksejs Sinicins <monder@monder.cc>
|
||||
Alex <aleksandrosansan@gmail.com>
|
||||
Alex Chan <alex.chan@metaswitch.com>
|
||||
Alex Elman <aelman@indeed.com>
|
||||
Alex Laties <agl@tumblr.com>
|
||||
Alexander Larsson <alexl@redhat.com>
|
||||
Alexander Morozov <lk4d4math@gmail.com>
|
||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
Alfonso Acosta <fons@syntacticsugar.consulting>
|
||||
allencloud <allen.sun@daocloud.io>
|
||||
Alvin Feng <alvin4feng@yahoo.com>
|
||||
amitshukla <ashukla73@hotmail.com>
|
||||
Amy Lindburg <amy.lindburg@docker.com>
|
||||
Andreas Hassing <andreas@famhassing.dk>
|
||||
Andrew Bulford <andrew.bulford@redmatter.com>
|
||||
Andrew Hsu <andrewhsu@acm.org>
|
||||
Andrew Lavery <laverya@umich.edu>
|
||||
Andrew Leung <anwleung@gmail.com>
|
||||
Andrew Lively <andrew.lively2@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com>
|
||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
Andrey Kostov <kostov.andrey@gmail.com>
|
||||
Andrii Soldatenko <andrii.soldatenko@gmail.com>
|
||||
Andy Goldstein <agoldste@redhat.com>
|
||||
andyzhangx <xiazhang@microsoft.com>
|
||||
Anian Z <ziegler@sicony.de>
|
||||
Anil Belur <askb23@gmail.com>
|
||||
Anis Elleuch <vadmeste@gmail.com>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||
Anne Henmi <41210220+ahh-docker@users.noreply.github.com>
|
||||
Anton Tiurin <noxiouz@yandex.ru>
|
||||
Antonio Mercado <amercado@thinknode.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||
Antonio Ojea <antonio.ojea.garcia@gmail.com>
|
||||
Anusha Ragunathan <anusha@docker.com>
|
||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
||||
Arko Dasgupta <arkodg@users.noreply.github.com>
|
||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||
Arthur Baars <arthur@semmle.com>
|
||||
Arthur Gautier <baloo@gandi.net>
|
||||
Asuka Suzuki <hello@tanksuzuki.com>
|
||||
Avi Miller <avi.miller@oracle.com>
|
||||
Aviral Takkar <aviral26@users.noreply.github.com>
|
||||
Ayose Cazorla <ayosec@gmail.com>
|
||||
BadZen <dave.trombley@gmail.com>
|
||||
baojiangnan <baojiangnan@meituan.com>
|
||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
||||
Ben De St Paer-Gotch <bende@outlook.com>
|
||||
Ben Emamian <ben@ictace.com>
|
||||
Ben Firshman <ben@firshman.co.uk>
|
||||
Ben Kochie <superq@gmail.com>
|
||||
Ben Manuel <ben.manuel@procore.com>
|
||||
Bhavin Gandhi <bhavin192@users.noreply.github.com>
|
||||
Bill <NonCreature0714@users.noreply.github.com>
|
||||
bin liu <liubin0329@gmail.com>
|
||||
Bouke van der Bijl <me@bou.ke>
|
||||
Bracken Dawson <abdawson@gmail.com>
|
||||
Brandon Mitchell <git@bmitch.net>
|
||||
Brandon Philips <brandon@ifup.co>
|
||||
Brett Higgins <brhiggins@arbor.net>
|
||||
Brian Bland <brian.t.bland@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
burnettk <burnettk@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Carson A <ca@carsonoid.net>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Chad Faragher <wyckster@hotmail.com>
|
||||
Chaos John <chaosjohn.yjh@icloud.com>
|
||||
Charles Smith <charles.smith@docker.com>
|
||||
Cheng Zheng <chengzheng.apply@gmail.com>
|
||||
chlins <chenyuzh@vmware.com>
|
||||
Chris Aniszczyk <caniszczyk@gmail.com>
|
||||
Chris Dillon <squarism@gmail.com>
|
||||
Chris K. Wong <chriskw.xyz@gmail.com>
|
||||
Chris Patterson <chrispat@github.com>
|
||||
Christopher Yeleighton <ne01026@shark.2a.pl>
|
||||
Christy Perez <christy@linux.vnet.ibm.com>
|
||||
Chuanying Du <cydu@google.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Collin Shoop <cshoop@digitalocean.com>
|
||||
Corey Quon <corey.quon@gmail.com>
|
||||
Cory Snider <csnider@mirantis.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
cressie176 <github@stephen-cresswell.net>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
cui fliter <imcusg@gmail.com>
|
||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
||||
cyli <cyli@twistedmatrix.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
Daisuke Fujita <dtanshi45@gmail.com>
|
||||
Damien Mathieu <dmathieu@salesforce.com>
|
||||
Dan Fredell <furtchet@gmail.com>
|
||||
Dan Walsh <dwalsh@redhat.com>
|
||||
Daniel Helfand <helfand.4@gmail.com>
|
||||
Daniel Huhn <daniel@danielhuhn.de>
|
||||
Daniel Menet <membership@sontags.ch>
|
||||
Daniel Mizyrycki <mzdaniel@glidelink.net>
|
||||
Daniel Nephin <dnephin@gmail.com>
|
||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
||||
Danila Fominykh <dancheg97@fmnx.su>
|
||||
Darren Shepherd <darren@rancher.com>
|
||||
Dave <david.warshaw@gmail.com>
|
||||
Dave Trombley <dave.trombley@gmail.com>
|
||||
Dave Tucker <dt@docker.com>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Justice <david@devigned.com>
|
||||
David Karlsson <david.karlsson@docker.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Luu <david@davidluu.info>
|
||||
David Mackey <tdmackey@booleanhaiku.com>
|
||||
David van der Spek <vanderspek.david@gmail.com>
|
||||
David Verhasselt <david@crowdway.com>
|
||||
David Wu <dwu7401@gmail.com>
|
||||
David Xia <dxia@spotify.com>
|
||||
Dawn W Docker <dawn.wood@users.noreply.github.com>
|
||||
ddelange <14880945+ddelange@users.noreply.github.com>
|
||||
Dejan Golja <dejan@golja.org>
|
||||
Denis Andrejew <da.colonel@gmail.com>
|
||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
Derek <crq@kernel.org>
|
||||
Derek McGowan <derek@mcg.dev>
|
||||
Deshi Xiao <xiaods@gmail.com>
|
||||
Dimitar Kostadinov <dimitar.kostadinov@sap.com>
|
||||
Diogo Mónica <diogo.monica@gmail.com>
|
||||
DJ Enriquez <dj.enriquez@infospace.com>
|
||||
Djibril Koné <kone.djibril@gmail.com>
|
||||
dmp <dmp@loaner.local>
|
||||
Don Bowman <don@agilicus.com>
|
||||
Don Kjer <don.kjer@gmail.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
drornir <drornir@users.noreply.github.com>
|
||||
duanhongyi <duanhongyi@doopai.com>
|
||||
ducksecops <daniel@ducksecops.uk>
|
||||
E. M. Bray <erik.m.bray@gmail.com>
|
||||
Edgar Lee <edgar.lee@docker.com>
|
||||
Elliot Pahl <elliot.pahl@gmail.com>
|
||||
elsanli(李楠) <elsanli@tencent.com>
|
||||
Elton Stoneman <elton@sixeyed.com>
|
||||
Emmanuel Briney <emmanuel.briney@docker.com>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
Eohyung Lee <liquidnuker@gmail.com>
|
||||
Eric Yang <windfarer@gmail.com>
|
||||
Erica Windisch <erica@windisch.us>
|
||||
Erik Hollensbe <github@hollensbe.org>
|
||||
Etki <etki@etki.me>
|
||||
Eugene Lubarsky <eug48@users.noreply.github.com>
|
||||
eyjhb <eyjhbb@gmail.com>
|
||||
eyjhbb@gmail.com <eyjhbb@gmail.com>
|
||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
||||
Fabio Falci <fabiofalci@gmail.com>
|
||||
Fabio Huser <fabio@fh1.ch>
|
||||
farmerworking <farmerworking@gmail.com>
|
||||
fate-grand-order <chenjg@harmonycloud.cn>
|
||||
Felix Bünemann <buenemann@louis.info>
|
||||
Felix Yan <felixonmars@archlinux.org>
|
||||
Feng Honglin <tifayuki@gmail.com>
|
||||
Fernando Mayo Fernandez <fernando@undefinedlabs.com>
|
||||
Flavian Missi <fmissi@redhat.com>
|
||||
Florentin Raud <florentin.raud@gmail.com>
|
||||
forkbomber <forkbomber@users.noreply.github.com>
|
||||
Frank Chen <frankchn@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Gabor Nagy <mail@aigeruth.hu>
|
||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
||||
Gaetan <gdevillele@gmail.com>
|
||||
gary schaetz <gary@schaetzkc.com>
|
||||
gbarr01 <gwendolynne.barr@docker.com>
|
||||
Geoffrey Hausheer <rc2012@pblue.org>
|
||||
ghodsizadeh <mehdi.ghodsizadeh@gmail.com>
|
||||
Giovanni Toraldo <giovanni.toraldo@eng.it>
|
||||
Gladkov Alexey <agladkov@redhat.com>
|
||||
Gleb M Borisov <borisov.gleb@gmail.com>
|
||||
Gleb Schukin <gschukin@ptsecurity.com>
|
||||
glefloch <glfloch@gmail.com>
|
||||
Glyn Owen Hanmer <1295698+glynternet@users.noreply.github.com>
|
||||
gotgelf <gotgelf@gmail.com>
|
||||
Grachev Mikhail <work@mgrachev.com>
|
||||
Grant Watters <grant.watters@docker.com>
|
||||
Greg Rebholz <gregrebholz@gmail.com>
|
||||
Guillaume J. Charmes <charmes.guillaume@gmail.com>
|
||||
Guillaume Rose <guillaume.rose@docker.com>
|
||||
Gábor Lipták <gliptak@gmail.com>
|
||||
harche <p.harshal@gmail.com>
|
||||
hasheddan <georgedanielmangum@gmail.com>
|
||||
Hayley Swimelar <hswimelar@gmail.com>
|
||||
Helen-xie <xieyulin821@harmonycloud.cn>
|
||||
Henri Gomez <henri.gomez@gmail.com>
|
||||
Honglin Feng <tifayuki@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hua Wang <wanghua.humble@gmail.com>
|
||||
HuKeping <hukeping@huawei.com>
|
||||
Huu Nguyen <whoshuu@gmail.com>
|
||||
ialidzhikov <i.alidjikov@gmail.com>
|
||||
Ian Babrou <ibobrik@gmail.com>
|
||||
iasoon <ilion.beyst@gmail.com>
|
||||
igayoso <igayoso@gmail.com>
|
||||
Igor Dolzhikov <bluesriverz@gmail.com>
|
||||
Igor Morozov <igmorv@gmail.com>
|
||||
Ihor Dvoretskyi <ihor@linux.com>
|
||||
Ilion Beyst <ilion.beyst@gmail.com>
|
||||
Ina Panova <ipanova@redhat.com>
|
||||
Irene Diez <idiez@redhat.com>
|
||||
Ismail Alidzhikov <i.alidjikov@gmail.com>
|
||||
Jack Baines <jack.baines@uk.ibm.com>
|
||||
Jack Griffin <jackpg14@gmail.com>
|
||||
Jacob Atzen <jatzen@gmail.com>
|
||||
Jake Moshenko <jake@devtable.com>
|
||||
Jakob Ackermann <das7pad@outlook.com>
|
||||
Jakub Mikulas <jakub@mikul.as>
|
||||
James Findley <jfindley@fastmail.com>
|
||||
James Hewitt <james.hewitt@uk.ibm.com>
|
||||
James Lal <james@lightsofapollo.com>
|
||||
Jason Freidman <jason.freidman@gmail.com>
|
||||
Jason Heiss <jheiss@aput.net>
|
||||
Javier Palomo Almena <javier.palomo.almena@gmail.com>
|
||||
jdolitsky <393494+jdolitsky@users.noreply.github.com>
|
||||
Jeff Nickoloff <jeff@allingeek.com>
|
||||
Jeffrey van Gogh <jvg@google.com>
|
||||
jerae-duffin <83294991+jerae-duffin@users.noreply.github.com>
|
||||
Jeremy THERIN <jtherin@scaleway.com>
|
||||
Jesse Brown <jabrown85@gmail.com>
|
||||
Jesse Haka <haka.jesse@gmail.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
jhaohai <jhaohai@foxmail.com>
|
||||
Jianqing Wang <tsing@jianqing.org>
|
||||
Jihoon Chung <jihoon@gmail.com>
|
||||
Jim Galasyn <jim.galasyn@docker.com>
|
||||
Joao Fernandes <joaofnfernandes@gmail.com>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
John Howard <github@lowenna.com>
|
||||
John Mulhausen <john@docker.com>
|
||||
John Starks <jostarks@microsoft.com>
|
||||
Jon Johnson <jonjohnson@google.com>
|
||||
Jon Poler <jonathan.poler@apcera.com>
|
||||
Jonas Hecht <jonas.hecht@codecentric.de>
|
||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||
Jonathan Rudenberg <jonathan@titanous.com>
|
||||
Jordan Liggitt <jliggitt@redhat.com>
|
||||
Jose D. Gomez R <jose.gomez@suse.com>
|
||||
Josh Chorlton <josh.chorlton@docker.com>
|
||||
Josh Dolitsky <josh@dolit.ski>
|
||||
Josh Hawn <jlhawn@berkeley.edu>
|
||||
Josiah Kiehl <jkiehl@riotgames.com>
|
||||
Joyce Brum <joycebrumu.u@gmail.com>
|
||||
João Pereira <484633+joaodrp@users.noreply.github.com>
|
||||
Julien Bordellier <1444415+jstoja@users.noreply.github.com>
|
||||
Julien Fernandez <julien.fernandez@gmail.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin I. Nevill <JustinINevill@users.noreply.github.com>
|
||||
Justin Santa Barbara <justin@fathomdb.com>
|
||||
kaiwentan <kaiwentan@harmonycloud.cn>
|
||||
Ke Xu <leonhartx.k@gmail.com>
|
||||
Keerthan Mala <kmala@engineyard.com>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Ken Cochrane <KenCochrane@gmail.com>
|
||||
Kenneth Lim <kennethlimcp@gmail.com>
|
||||
Kenny Leung <kleung@google.com>
|
||||
Kevin Lin <kevin@kelda.io>
|
||||
Kevin Robatel <kevinrob2@gmail.com>
|
||||
Kira <me@imkira.com>
|
||||
Kirat Singh <kirat.singh@gmail.com>
|
||||
L-Hudson <44844738+L-Hudson@users.noreply.github.com>
|
||||
Lachlan Cooper <lachlancooper@gmail.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Lenny Linux <tippexs91@googlemail.com>
|
||||
Leonardo Azize Martins <lazize@users.noreply.github.com>
|
||||
leonstrand <leonstrand@gmail.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Liam White <liamwhite@uk.ibm.com>
|
||||
libo.huang <huanglibo2010@gmail.com>
|
||||
LingFaKe <lingfake@huawei.com>
|
||||
Liron Levin <liron@twistlock.com>
|
||||
lisong <lisong@cdsunrise.net>
|
||||
Littlemoon917 <18084421+Littlemoon917@users.noreply.github.com>
|
||||
Liu Hua <sdu.liu@huawei.com>
|
||||
liuchang0812 <liuchang0812@gmail.com>
|
||||
liyongxin <yxli@alauda.io>
|
||||
Lloyd Ramey <lnr0626@gmail.com>
|
||||
lostsquirrel <lostsquirreli@hotmail.com>
|
||||
Louis Kottmann <louis.kottmann@gmail.com>
|
||||
Luca Bruno <lucab@debian.org>
|
||||
Lucas França de Oliveira <lucasfdo@palantir.com>
|
||||
Lucas Santos <lhs.santoss@gmail.com>
|
||||
Luis Lobo Borobia <luislobo@gmail.com>
|
||||
Luke Carpenter <x@rubynerd.net>
|
||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||
Makoto Oda <truth_jp_4133@yahoo.co.jp>
|
||||
mallchin <mallchin@mac.com>
|
||||
Manish Tomar <manish.tomar@docker.com>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Martins <marcus@docker.com>
|
||||
Maria Bermudez <bermudez.mt@gmail.com>
|
||||
Mark Sagi-Kazar <mark.sagikazar@gmail.com>
|
||||
Mary Anthony <mary@docker.com>
|
||||
Masataka Mizukoshi <m.mizukoshi.wakuwaku@gmail.com>
|
||||
Matin Rahmanian <itsmatinx@gmail.com>
|
||||
MATSUMOTO TAKEAKI <takeaki.matsumoto@linecorp.com>
|
||||
Matt Bentley <mbentley@mbentley.net>
|
||||
Matt Duch <matt@learnmetrics.com>
|
||||
Matt Linville <matt@linville.me>
|
||||
Matt Moore <mattmoor@google.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matt Tescher <matthew.tescher@docker.com>
|
||||
Matthew Balvanz <matthew.balvanz@workiva.com>
|
||||
Matthew Green <greenmr@live.co.uk>
|
||||
Matthew Riley <mattdr@google.com>
|
||||
Maurice Sotzny <ailuridae@users.noreply.github.com>
|
||||
Meaglith Ma <genedna@gmail.com>
|
||||
Michael Bonfils <bonfils.michael@protonmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com>
|
||||
Michael Prokop <mika@grml.org>
|
||||
Michael Vetter <jubalh@iodoru.org>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Michal Gebauer <mishak@mishak.net>
|
||||
Michal Guerquin <michalg@allenai.org>
|
||||
Michal Minar <miminar@redhat.com>
|
||||
Mike Brown <brownwm@us.ibm.com>
|
||||
Mike Lundy <mike@fluffypenguin.org>
|
||||
Mike Truman <miketruman42@gmail.com>
|
||||
Milos Gajdos <milosthegajdos@gmail.com>
|
||||
Miquel Sabaté <msabate@suse.com>
|
||||
mlmhl <409107750@qq.com>
|
||||
Monika Katiyar <monika@jeavio.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
moxiegirl <mary@docker.com>
|
||||
mqliang <mqliang.zju@gmail.com>
|
||||
Muesli <solom.emmanuel@gmail.com>
|
||||
Nan Monnand Deng <monnand@gmail.com>
|
||||
Nat Zimmermann <ntzm@users.noreply.github.com>
|
||||
Nathan Sullivan <nathan@nightsys.net>
|
||||
Naveed Jamil <naveed.jamil@tenpearl.com>
|
||||
Neil Wilson <neil@aldur.co.uk>
|
||||
nevermosby <robolwq@qq.com>
|
||||
Nghia Tran <tcnghia@gmail.com>
|
||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Nikita Tarasov <nikita@mygento.ru>
|
||||
ning xie <andy.xning@gmail.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
Novak Ivanovski <novakivanovski@gmail.com>
|
||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
||||
Nycholas de Oliveira e Oliveira <nycholas@gmail.com>
|
||||
Oilbeater <liumengxinfly@gmail.com>
|
||||
Oleg Bulatov <oleg@bulatov.me>
|
||||
olegburov <oleg.burov@outlook.com>
|
||||
Olivier <o+github@gambier.email>
|
||||
Olivier Gambier <olivier@docker.com>
|
||||
Olivier Jacques <olivier.jacques@hp.com>
|
||||
ollypom <oppomeroy@gmail.com>
|
||||
Omer Cohen <git@omer.io>
|
||||
Oscar Caballero <ocaballero@opensistemas.com>
|
||||
Owen W. Taylor <otaylor@fishsoup.net>
|
||||
paigehargrave <Paige.hargrave@docker.com>
|
||||
Parth Mehrotra <parth@mehrotra.me>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Patrick Devine <patrick.devine@docker.com>
|
||||
Patrick Easters <peasters@redhat.com>
|
||||
Paul Cacheux <paul.cacheux@datadoghq.com>
|
||||
Pavel Antonov <ddc67cd@gmail.com>
|
||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||
Per Lundberg <perlun@gmail.com>
|
||||
Peter Choi <reikani@Peters-MacBook-Pro.local>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Kokot <peterkokot@gmail.com>
|
||||
Phil Estes <estesp@gmail.com>
|
||||
Philip Misiowiec <philip@atlashealth.com>
|
||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
||||
Pieter Scheffers <pieter.scheffers@gmail.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
Qiao Anran <qiaoanran@gmail.com>
|
||||
Radon Rosborough <radon.neon@gmail.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
Raphaël Enrici <raphael@root-42.com>
|
||||
Ricardo Maraschini <ricardo.maraschini@gmail.com>
|
||||
Richard Scothern <richard.scothern@gmail.com>
|
||||
Rick Wieman <git@rickw.nl>
|
||||
Rik Nijessen <rik@keefo.nl>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Rober Morales-Chaparro <rober.morales@rstor.io>
|
||||
Robert Kaussow <mail@geeklabor.de>
|
||||
Robert Steward <speaktorob@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
ROY <qqbuby@gmail.com>
|
||||
Rui Cao <ruicao@alauda.io>
|
||||
ruicao <ruicao@alauda.io>
|
||||
Rusty Conover <rusty@luckydinosaur.com>
|
||||
Ryan Abrams <rdabrams@gmail.com>
|
||||
Ryan Thomas <rthomas@atlassian.com>
|
||||
sakeven <jc5930@sina.cn>
|
||||
Sam Alba <sam.alba@gmail.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
sangluo <sangluo@pinduoduo.com>
|
||||
Santiago Torres <torresariass@gmail.com>
|
||||
Sargun Dhillon <sargun@sargun.me>
|
||||
sayboras <sayboras@yahoo.com>
|
||||
Sean Boran <Boran@users.noreply.github.com>
|
||||
Sean P. Kane <spkane00@gmail.com>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastien Coavoux <s.coavoux@free.fr>
|
||||
Serge Dubrouski <sergeyfd@gmail.com>
|
||||
Sevki Hasirci <sevki@cloudflare.com>
|
||||
Sharif Nassar <sharif@mrwacky.com>
|
||||
Shawn Chen <chen8132@gmail.com>
|
||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
||||
Shawnpku <chen8132@gmail.com>
|
||||
Shengjing Zhu <zhsj@debian.org>
|
||||
Shiela M Parker <smp13@live.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
||||
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
Simon <crydotsnakegithub@gmail.com>
|
||||
Simon Thulbourn <simon+github@thulbourn.com>
|
||||
Simone Locci <simone.locci@eng.it>
|
||||
Smasherr <soundcracker@gmail.com>
|
||||
Solomon Hykes <solomon@dagger.io>
|
||||
Sora Morimoto <sora@morimoto.io>
|
||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
||||
Spencer Rinehart <anubis@overthemonkey.com>
|
||||
srajmane <31947381+srajmane@users.noreply.github.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Stan Hu <stanhu@gmail.com>
|
||||
Stefan Lörwald <10850250+stefanloerwald@users.noreply.github.com>
|
||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
Stefan Nica <snica@suse.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Stephen Day <stevvooe@gmail.com>
|
||||
Steve Lasker <stevenlasker@hotmail.com>
|
||||
Steven Hanna <stevenhanna6@gmail.com>
|
||||
Steven Kalt <SKalt@users.noreply.github.com>
|
||||
Steven Taylor <steven.taylor@me.com>
|
||||
stonezdj <stonezdj@gmail.com>
|
||||
sun jian <cnhttpd@gmail.com>
|
||||
Sungho Moon <sungho.moon@navercorp.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
syntaxkim <40621244+syntaxkim@users.noreply.github.com>
|
||||
T N <tnir@users.noreply.github.com>
|
||||
t-eimizu <t-eimizu@aim.ac>
|
||||
Tariq Ibrahim <tariq181290@gmail.com>
|
||||
TaylorKanper <tony_kanper@hotmail.com>
|
||||
Ted Reed <ted.reed@gmail.com>
|
||||
Terin Stock <terinjokes@gmail.com>
|
||||
tgic <farmer1992@gmail.com>
|
||||
Thomas Berger <loki@lokis-chaos.de>
|
||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
||||
Tianon Gravi <admwiggin@gmail.com>
|
||||
Tibor Vass <teabee89@gmail.com>
|
||||
tifayuki <tifayuki@gmail.com>
|
||||
Tiger Kaovilai <tkaovila@redhat.com>
|
||||
Tobias Fuhrimann <mastertinner@users.noreply.github.com>
|
||||
Tobias Schwab <tobias.schwab@dynport.de>
|
||||
Tom Hayward <thayward@infoblox.com>
|
||||
Tom Hu <tomhu1096@gmail.com>
|
||||
Tonis Tiigi <tonistiigi@gmail.com>
|
||||
Tony Holdstock-Brown <tony@docker.com>
|
||||
Tosone <i@tosone.cn>
|
||||
Trapier Marshall <trapier@users.noreply.github.com>
|
||||
Trevor Pounds <trevor.pounds@gmail.com>
|
||||
Trevor Wood <Trevor.G.Wood@gmail.com>
|
||||
Troels Thomsen <troels@thomsen.io>
|
||||
uhayate <uhayate.gong@daocloud.io>
|
||||
Usha Mandya <47779042+usha-mandya@users.noreply.github.com>
|
||||
Usha Mandya <usha.mandya@docker.com>
|
||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||
Vega Chou <VegeChou@users.noreply.github.com>
|
||||
Veres Lajos <vlajos@gmail.com>
|
||||
Victor Vieux <victorvieux@gmail.com>
|
||||
Victoria Bialas <victoria.bialas@docker.com>
|
||||
Vidar <vl@ez.no>
|
||||
Viktor Stanchev <me@viktorstanchev.com>
|
||||
Vincent Batts <vbatts@redhat.com>
|
||||
Vincent Demeester <vincent.demeester@docker.com>
|
||||
Vincent Giersch <vincent@giersch.fr>
|
||||
Vishesh Jindal <vishesh92@gmail.com>
|
||||
W. Trevor King <wking@tremily.us>
|
||||
Wang Jie <wangjie5@chinaskycloud.com>
|
||||
Wang Yan <wangyan@vmware.com>
|
||||
Wassim Dhif <wassimdhif@gmail.com>
|
||||
wayne <wayne.warren.s@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Meng <wemeng@microsoft.com>
|
||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
||||
Wen-Quan Li <legendarilylwq@gmail.com>
|
||||
Wenkai Yin <yinw@vmware.com>
|
||||
william wei <1342247033@qq.com>
|
||||
xg.song <xg.song@venusource.com>
|
||||
xiekeyang <xiekeyang@huawei.com>
|
||||
Xueshan Feng <xueshan.feng@gmail.com>
|
||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
||||
Yannick Fricke <YannickFricke@users.noreply.github.com>
|
||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
||||
yixi zhang <yixi@memsql.com>
|
||||
Yong Tang <yong.tang.github@outlook.com>
|
||||
Yong Wen Chua <lawliet89@users.noreply.github.com>
|
||||
Yongxin Li <yxli@alauda.io>
|
||||
Yu Wang <yuwa@microsoft.com>
|
||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
||||
YuJie <390282283@qq.com>
|
||||
yuzou <zouyu7@huawei.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
zhipengzuo <zuozhipeng@baidu.com>
|
||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
zounengren <zounengren@cmss.chinamobile.com>
|
||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
129
BUILDING.md
Normal file
129
BUILDING.md
Normal file
|
@ -0,0 +1,129 @@
|
|||
|
||||
# Building the registry source
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to actively work on the registry.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Most people should use prebuilt images, for example, the [Registry docker image](https://hub.docker.com/r/library/registry/) provided by Docker.
|
||||
|
||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||
|
||||
The latest updates to `main` branch are automatically pushed to [distribution Docker Hub repository](https://hub.docker.com/r/distribution/distribution) and tagged with `edge` tag.
|
||||
|
||||
### Gotchas
|
||||
|
||||
You are expected to know your way around with `go` & `git`.
|
||||
|
||||
If you are a casual user with no development experience, and no preliminary knowledge of Go, building from source is probably not a good solution for you.
|
||||
|
||||
## Configure the development environment
|
||||
|
||||
The first prerequisite of properly building distribution targets is to have a Go
|
||||
development environment setup. Please follow [How to Write Go Code](https://go.dev/doc/code) for proper setup.
|
||||
|
||||
Next, fetch the code from the repository using git:
|
||||
|
||||
git clone https://github.com/distribution/distribution
|
||||
cd distribution
|
||||
|
||||
If you are planning to create a pull request with changes, you may want to clone directly from your [fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/about-forks).
|
||||
|
||||
## Build and run from source
|
||||
|
||||
First, build the binaries:
|
||||
|
||||
$ make
|
||||
+ bin/registry
|
||||
+ bin/digest
|
||||
+ bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
||||
|
||||
mkdir -p /var/lib/registry
|
||||
|
||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
||||
|
||||
The `registry`
|
||||
binary can then be run with the following:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/distribution/distribution/v3 v2.7.0-1993-g8857a194
|
||||
|
||||
The registry can be run with a development config using the following
|
||||
incantation:
|
||||
|
||||
$ ./bin/registry serve cmd/registry/config-dev.yml
|
||||
INFO[0000] debug server listening :5001
|
||||
WARN[0000] No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable. environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] endpoint local-5003 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] endpoint local-8083 disabled, skipping environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] using inmemory blob descriptor cache environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
INFO[0000] providing prometheus metrics on /metrics
|
||||
INFO[0000] listening on [::]:5000 environment=development go.version=go1.18.3 instance.id=e837df62-a66c-4e04-a014-b063546e82e0 service=registry version=v2.7.0-1993-g8857a194
|
||||
|
||||
If it is working, one should see the above log messages.
|
||||
|
||||
### Build reference
|
||||
|
||||
The regular `go` commands, such as `go test`, should work per package.
|
||||
|
||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
||||
|
||||
Run `make` to build the binaries:
|
||||
|
||||
$ make
|
||||
+ bin/registry
|
||||
+ bin/digest
|
||||
+ bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
The above provides a repeatable build using the contents of the vendor
|
||||
directory. We can verify this worked by running
|
||||
the registry binary generated in the "./bin" directory:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/distribution/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
||||
|
||||
Run `make test` to run all of the tests.
|
||||
|
||||
Run `make validate` to run the validators, including the linter and vendor validation. You must have docker with the buildx plugin installed to run the validators.
|
||||
|
||||
### Optional build tags
|
||||
|
||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
||||
the environment variable `BUILDTAGS`.
|
||||
|
||||
<dl>
|
||||
<dt>noresumabledigest</dt>
|
||||
<dd>Compiles without resumable digest support</dd>
|
||||
</dl>
|
||||
|
||||
### Local cloud storage environment
|
||||
|
||||
You can run an S3 API compatible storage locally with [minio](https://min.io/).
|
||||
|
||||
You must have a [docker compose](https://docs.docker.com/compose/) compatible tool installed on your workstation.
|
||||
|
||||
Start the local cloud environment:
|
||||
```
|
||||
make start-cloud-storage
|
||||
```
|
||||
There is a sample registry configuration file that lets you point the registry to the started storage:
|
||||
```
|
||||
AWS_ACCESS_KEY=distribution \
|
||||
AWS_SECRET_KEY=password \
|
||||
AWS_REGION=us-east-1 \
|
||||
S3_BUCKET=images-local \
|
||||
S3_ENCRYPT=false \
|
||||
REGION_ENDPOINT=http://127.0.0.1:9000 \
|
||||
S3_SECURE=false \
|
||||
./bin/registry serve tests/conf-local-cloud.yml
|
||||
```
|
||||
Stop the local storage when done:
|
||||
```
|
||||
make stop-cloud-storage
|
||||
```
|
5
CODE-OF-CONDUCT.md
Normal file
5
CODE-OF-CONDUCT.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Code of Conduct
|
||||
|
||||
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
|
129
CONTRIBUTING.md
Normal file
129
CONTRIBUTING.md
Normal file
|
@ -0,0 +1,129 @@
|
|||
# Contributing to the registry
|
||||
|
||||
## Before reporting an issue...
|
||||
|
||||
### If your problem is with...
|
||||
|
||||
- automated builds or your [Docker Hub](https://hub.docker.com/) account
|
||||
- Report it to [Hub Support](https://hub.docker.com/support/)
|
||||
- Distributions of Docker for desktop or Linux
|
||||
- Report [Mac Desktop issues](https://github.com/docker/for-mac)
|
||||
- Report [Windows Desktop issues](https://github.com/docker/for-win)
|
||||
- Report [Linux issues](https://github.com/docker/for-linux)
|
||||
|
||||
### If you...
|
||||
|
||||
- need help setting up your registry
|
||||
- can't figure out something
|
||||
- are not sure what's going on or what your problem is
|
||||
|
||||
Please ask first in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
|
||||
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of (or similar for other container tools):
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers or reviewers and merged when acceptable.
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
|
||||
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
3. Test your code
|
||||
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
|
||||
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
||||
|
||||
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
for tips on creating a successful contribution.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
60
Dockerfile
Normal file
60
Dockerfile
Normal file
|
@ -0,0 +1,60 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.4
|
||||
ARG ALPINE_VERSION=3.20
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache bash coreutils file git
|
||||
ENV GO111MODULE=auto
|
||||
ENV CGO_ENABLED=0
|
||||
WORKDIR /src
|
||||
|
||||
FROM base AS version
|
||||
ARG PKG=github.com/distribution/distribution/v3
|
||||
RUN --mount=target=. \
|
||||
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
||||
echo "-X ${PKG}/version.version=${VERSION#v} -X ${PKG}/version.revision=${REVISION} -X ${PKG}/version.mainpkg=${PKG}" | tee /tmp/.ldflags; \
|
||||
echo -n "${VERSION}" | tee /tmp/.version;
|
||||
|
||||
FROM base AS build
|
||||
ARG TARGETPLATFORM
|
||||
ARG LDFLAGS="-s -w"
|
||||
ARG BUILDTAGS=""
|
||||
RUN --mount=type=bind,target=/src \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \
|
||||
set -x ; xx-go build -tags "${BUILDTAGS}" -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \
|
||||
&& xx-verify --static /usr/bin/registry
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /usr/bin/registry /
|
||||
|
||||
FROM base AS releaser
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
WORKDIR /work
|
||||
RUN --mount=from=binary,target=/build \
|
||||
--mount=type=bind,target=/src \
|
||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \
|
||||
VERSION=$(cat /tmp/.version) \
|
||||
&& mkdir -p /out \
|
||||
&& cp /build/registry /src/README.md /src/LICENSE . \
|
||||
&& tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \
|
||||
&& sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256"
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=releaser /out /
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY cmd/registry/config-dev.yml /etc/distribution/config.yml
|
||||
COPY --from=binary /registry /bin/registry
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
CMD ["serve", "/etc/distribution/config.yml"]
|
144
GOVERNANCE.md
Normal file
144
GOVERNANCE.md
Normal file
|
@ -0,0 +1,144 @@
|
|||
# distribution/distribution Project Governance
|
||||
|
||||
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
|
||||
|
||||
For specific guidance on practical contribution steps please
|
||||
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
|
||||
|
||||
## Maintainership
|
||||
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
|
||||
## Reviewers
|
||||
|
||||
A reviewer is a core role within the project.
|
||||
They share in reviewing issues and pull requests and their LGTM counts towards the
|
||||
required LGTM count to merge a code change into the project.
|
||||
|
||||
Reviewers are part of the organization but do not have write access.
|
||||
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
|
||||
|
||||
## Adding maintainers
|
||||
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed in a pull request or a
|
||||
maintainers communication channel.
|
||||
|
||||
After a candidate has been announced to the maintainers, the existing
|
||||
maintainers are given five business days to discuss the candidate, raise
|
||||
objections and cast their vote. Votes may take place on the communication
|
||||
channel or via pull request comment. Candidates must be approved by at least 66%
|
||||
of the current maintainers by adding their vote on the mailing list. The
|
||||
reviewer role has the same process but only requires 33% of current maintainers.
|
||||
Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The voting process may take place inside a pull request if a
|
||||
maintainer has already discussed the candidacy with the candidate and a
|
||||
maintainer is willing to be a sponsor by opening the pull request. The candidate
|
||||
becomes a maintainer once the pull request is merged.
|
||||
|
||||
## Stepping down policy
|
||||
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
|
||||
## Removal of inactive maintainers
|
||||
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of the current
|
||||
maintainers. In this case, maintainers should first propose the change to
|
||||
maintainers via the maintainers communication channel, then open a pull request
|
||||
for voting. The voting period is five business days. The voting pull request
|
||||
should not come as a surpise to any maintainer and any discussion related to
|
||||
performance must not be discussed on the pull request.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
CNCF distribution is an open-source project with an open design philosophy.
|
||||
This means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's part of
|
||||
the project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting distribution, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
|
||||
## Helping contributors with the DCO
|
||||
|
||||
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
|
||||
## I'm a maintainer. Should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
If you have a technical dispute that you feel has reached an impasse with a
|
||||
subset of the community, any contributor may open an issue, specifically
|
||||
calling for a resolution vote of the current core maintainers to resolve the
|
||||
dispute. The same voting quorums required (2/3) for adding and removing
|
||||
maintainers will apply to conflict resolution.
|
202
LICENSE
Normal file
202
LICENSE
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
26
MAINTAINERS
Normal file
26
MAINTAINERS
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Distribution project maintainers & reviewers
|
||||
#
|
||||
# See GOVERNANCE.md for maintainer versus reviewer roles
|
||||
#
|
||||
# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
|
||||
# GitHub ID, Name, Email address
|
||||
"chrispat","Chris Patterson","chrispat@github.com"
|
||||
"clarkbw","Bryan Clark","clarkbw@github.com"
|
||||
"corhere","Cory Snider","csnider@mirantis.com"
|
||||
"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
|
||||
"heww","He Weiwei","hweiwei@vmware.com"
|
||||
"joaodrp","João Pereira","jpereira@gitlab.com"
|
||||
"justincormack","Justin Cormack","justin.cormack@docker.com"
|
||||
"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
|
||||
"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
|
||||
"sargun","Sargun Dhillon","sargun@sargun.me"
|
||||
"wy65701436","Wang Yan","wangyan@vmware.com"
|
||||
"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
|
||||
#
|
||||
# REVIEWERS
|
||||
# GitHub ID, Name, Email address
|
||||
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
|
||||
"stevvooe","Stephen Day","stevvooe@gmail.com"
|
||||
"thajeztah","Sebastiaan van Stijn","github@gone.nl"
|
||||
"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
|
||||
"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
|
186
Makefile
Normal file
186
Makefile
Normal file
|
@ -0,0 +1,186 @@
|
|||
.DEFAULT_GOAL := help
|
||||
|
||||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Used to populate version variable in main package.
|
||||
VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION ?= $(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
|
||||
# default compose command
|
||||
COMPOSE ?= docker compose
|
||||
|
||||
PKG=github.com/distribution/distribution/v3
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/)
|
||||
INTEGRATION_PACKAGE=${PKG}
|
||||
COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES})
|
||||
|
||||
IMAGE_REPO ?= distribution/distribution
|
||||
IMAGE_TAG ?= latest
|
||||
IMAGE_NAME ?= $(IMAGE_REPO):$(IMAGE_TAG)
|
||||
|
||||
# Project binaries.
|
||||
COMMANDS=registry digest registry-api-descriptor-template
|
||||
|
||||
# Allow turning off function inlining and variable registerization
|
||||
ifeq (${DISABLE_OPTIMIZATION},true)
|
||||
GO_GCFLAGS=-gcflags "-N -l"
|
||||
VERSION:="$(VERSION)-noopt"
|
||||
endif
|
||||
|
||||
WHALE = "+"
|
||||
|
||||
# Go files
|
||||
#
|
||||
TESTFLAGS_RACE=
|
||||
GOFILES=$(shell find . -type f -name '*.go')
|
||||
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags '-extldflags "-Wl,-z,now" -s -w -X $(PKG)/version.version=$(VERSION) -X $(PKG)/version.revision=$(REVISION) -X $(PKG)/version.mainpkg=$(PKG) $(EXTRA_LDFLAGS)'
|
||||
|
||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||
|
||||
# Flags passed to `go test`
|
||||
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
|
||||
TESTFLAGS_PARALLEL ?= 8
|
||||
|
||||
.PHONY: all build binaries clean test test-race test-full integration test-coverage validate lint validate-git validate-vendor vendor mod-outdated image validate-authors authors
|
||||
.DEFAULT: all
|
||||
|
||||
.PHONY: FORCE
|
||||
FORCE:
|
||||
|
||||
##@ Build
|
||||
|
||||
# This only needs to be generated by hand when cutting full releases.
|
||||
version/version.go:
|
||||
@echo "$(WHALE) $@"
|
||||
./version/version.sh > $@
|
||||
|
||||
bin/%: cmd/% FORCE ## build individual binary
|
||||
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
||||
@go build -buildmode=pie ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} --ldflags '-extldflags "-Wl,-z,now" -s' ${GO_TAGS} ./$<
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
build: ## build go packages
|
||||
@echo "$(WHALE) $@"
|
||||
@go build -buildmode=pie ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} --ldflags '-extldflags "-Wl,-z,now" -s' ${GO_TAGS} $(PACKAGES)
|
||||
|
||||
image: ## build docker image IMAGE_NAME=<name>
|
||||
docker buildx bake --set "*.tags=${IMAGE_NAME}" image-local
|
||||
|
||||
clean: ## clean up binaries
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(BINARIES)
|
||||
|
||||
vendor: ## update vendor
|
||||
$(eval $@_TMP_OUT := $(shell mktemp -d -t buildx-output.XXXXXXXXXX))
|
||||
docker buildx bake --set "*.output=$($@_TMP_OUT)" update-vendor
|
||||
rm -rf ./vendor
|
||||
cp -R "$($@_TMP_OUT)"/out/* .
|
||||
rm -rf $($@_TMP_OUT)/*
|
||||
|
||||
mod-outdated: ## check outdated dependencies
|
||||
docker buildx bake $@
|
||||
|
||||
authors: ## generate authors
|
||||
docker buildx bake $@
|
||||
|
||||
##@ Test
|
||||
|
||||
test: ## run tests, except integration test with test.short
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
|
||||
|
||||
test-race: ## run tests, except integration test with test.short and race
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
|
||||
|
||||
test-full: ## run tests, except integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
|
||||
|
||||
integration: ## run integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE}
|
||||
|
||||
test-coverage: ## run unit tests and generate test coverprofiles
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f coverage.txt
|
||||
@go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null
|
||||
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \
|
||||
go test ${GO_TAGS} ${TESTFLAGS} \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-covermode=atomic $$pkg || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
.PHONY: test-cloud-storage
|
||||
test-cloud-storage: start-cloud-storage run-s3-tests stop-cloud-storage ## run cloud storage driver tests
|
||||
|
||||
.PHONY: start-cloud-storage
|
||||
start-cloud-storage: ## start local cloud storage (minio)
|
||||
$(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d
|
||||
|
||||
.PHONY: stop-cloud-storage
|
||||
stop-cloud-storage: ## stop local cloud storage (minio)
|
||||
$(COMPOSE) -f tests/docker-compose-storage.yml down
|
||||
|
||||
.PHONY: reset-cloud-storage
|
||||
reset-cloud-storage: ## reset (stop, delete, start) local cloud storage (minio)
|
||||
$(COMPOSE) -f tests/docker-compose-storage.yml down
|
||||
@mkdir -p tests/miniodata/distribution
|
||||
@rm -rf tests/miniodata/distribution/* tests/miniodata/.minio.sys
|
||||
$(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d
|
||||
|
||||
.PHONY: run-s3-tests
|
||||
run-s3-tests: start-cloud-storage ## run S3 storage driver integration tests
|
||||
AWS_ACCESS_KEY=distribution \
|
||||
AWS_SECRET_KEY=password \
|
||||
AWS_REGION=us-east-1 \
|
||||
S3_BUCKET=images-local \
|
||||
S3_ENCRYPT=false \
|
||||
REGION_ENDPOINT=http://127.0.0.1:9000 \
|
||||
S3_SECURE=false \
|
||||
S3_ACCELERATE=false \
|
||||
AWS_S3_FORCE_PATH_STYLE=true \
|
||||
go test ${TESTFLAGS} -count=1 ./registry/storage/driver/s3-aws/...
|
||||
|
||||
.PHONY: start-e2e-s3-env
|
||||
start-e2e-s3-env: ## starts E2E S3 storage test environment (S3, Redis, registry)
|
||||
$(COMPOSE) -f tests/docker-compose-e2e-cloud-storage.yml up -d
|
||||
|
||||
.PHONY: stop-e2e-s3-env
|
||||
stop-e2e-s3-env: ## stops E2E S3 storage test environment (S3, Redis, registry)
|
||||
$(COMPOSE) -f tests/docker-compose-e2e-cloud-storage.yml down
|
||||
|
||||
##@ Validate
|
||||
|
||||
lint: ## run all linters
|
||||
docker buildx bake $@
|
||||
|
||||
validate: ## run all validators
|
||||
docker buildx bake $@
|
||||
|
||||
validate-git: ## validate git
|
||||
docker buildx bake $@
|
||||
|
||||
validate-vendor: ## validate vendor
|
||||
docker buildx bake $@
|
||||
|
||||
validate-authors: ## validate authors
|
||||
docker buildx bake $@
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
@echo ""
|
||||
@echo "Go binaries: $(BINARIES)"
|
||||
@echo "Docker image: $(IMAGE_NAME)"
|
75
README.md
75
README.md
|
@ -1,3 +1,74 @@
|
|||
# WIP area: this repo is just a fork!
|
||||
<p align="center">
|
||||
<img style="align: center; padding-left: 10px; padding-right: 10px; padding-bottom: 10px;" width="238px" height="238px" src="./distribution-logo.svg" />
|
||||
</p>
|
||||
|
||||
Useful things may be published only in [other branches](../../../branches)
|
||||
[](https://github.com/distribution/distribution/actions/workflows/build.yml?query=workflow%3Abuild)
|
||||
[](https://pkg.go.dev/github.com/distribution/distribution)
|
||||
[](LICENSE)
|
||||
[](https://codecov.io/gh/distribution/distribution)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Fdistribution?ref=badge_shield)
|
||||
[](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance)
|
||||
[](https://securityscorecards.dev/viewer/?uri=github.com/distribution/distribution)
|
||||
|
||||
The toolset to pack, ship, store, and deliver content.
|
||||
|
||||
This repository's main product is the Open Source Registry implementation
|
||||
for storing and distributing container images and other content using the
|
||||
[OCI Distribution Specification](https://github.com/opencontainers/distribution-spec).
|
||||
The goal of this project is to provide a simple, secure, and scalable base
|
||||
for building a large scale registry solution or running a simple private registry.
|
||||
It is a core library for many registry operators including Docker Hub, GitHub Container Registry,
|
||||
GitLab Container Registry and DigitalOcean Container Registry, as well as the CNCF Harbor
|
||||
Project, and VMware Harbor Registry.
|
||||
|
||||
This repository contains the following components:
|
||||
|
||||
|**Component** |Description |
|
||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **registry** | An implementation of the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). |
|
||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://pkg.go.dev/github.com/distribution/distribution) for details. **Note**: The interfaces for these libraries are **unstable**. |
|
||||
| **documentation** | Full documentation is available at [https://distribution.github.io/distribution](https://distribution.github.io/distribution/).
|
||||
|
||||
### How does this integrate with Docker, containerd, and other OCI client?
|
||||
|
||||
Clients implement against the OCI specification and communicate with the
|
||||
registry using HTTP. This project contains a client implementation which
|
||||
is currently in use by Docker, however, it is deprecated for the
|
||||
[implementation in containerd](https://github.com/containerd/containerd/tree/master/remotes/docker)
|
||||
and will not support new features.
|
||||
|
||||
### What are the long term goals of the Distribution project?
|
||||
|
||||
The _Distribution_ project has the further long term goal of providing a
|
||||
secure tool chain for distributing content. The specifications, APIs and tools
|
||||
should be as useful with Docker as they are without.
|
||||
|
||||
Our goal is to design a professional grade and extensible content distribution
|
||||
system that allow users to:
|
||||
|
||||
* Enjoy an efficient, secured and reliable way to store, manage, package and
|
||||
exchange content
|
||||
* Hack/roll their own on top of healthy open-source components
|
||||
* Implement their own home made solution through good specs, and solid
|
||||
extensions mechanism.
|
||||
|
||||
## Contribution
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project. If you are contributing code, see
|
||||
the instructions for [building a development environment](BUILDING.md).
|
||||
|
||||
## Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
|
||||
that everyone is welcome to join and chat about development.
|
||||
|
||||
## Licenses
|
||||
|
||||
The distribution codebase is released under the [Apache 2.0 license](LICENSE).
|
||||
The README.md file, and files in the "docs" folder are licensed under the
|
||||
Creative Commons Attribution 4.0 International License. You may obtain a
|
||||
copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||
|
|
16
ROADMAP.md
Normal file
16
ROADMAP.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Roadmap
|
||||
|
||||
The Distribution project aims to support the following use cases
|
||||
|
||||
1. A library to support building highly scalable and reliable container registries,
|
||||
that can be customised for different backends and use cases. This is used by many
|
||||
of the largest registry operators, including Docker Hub, GitHub, GitLab, Harbor
|
||||
and Digital Ocean.
|
||||
2. A reference implementation of the OCI registry standards, and an easy way to
|
||||
experiment with new propsals in the registry space as these standards change.
|
||||
3. Distributed registry tools, such as caching registries and local registries
|
||||
that can be used within clusters for performance and locality use cases.
|
||||
|
||||
As every container application needs at least one registry as part of its infrastructure,
|
||||
and more cloud native artifacts are using registries as the basis of their distribution,
|
||||
having a widely used and supported open source registry is important for innovation.
|
17
SECURITY.md
Normal file
17
SECURITY.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
These versions are currently receiving security updates.
|
||||
|
||||
| Version | Supported | Notes |
|
||||
| ------------ | ------------------ | ----- |
|
||||
| 3.0.x (main) | :white_check_mark: | This is the next major version and has not yet been released. |
|
||||
| 2.8.x | :white_check_mark: | This is the latest released version. |
|
||||
| < 2.8 | :x: | |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
|
257
blobs.go
Normal file
257
blobs.go
Normal file
|
@ -0,0 +1,257 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBlobExists returned when blob already exists
|
||||
ErrBlobExists = errors.New("blob exists")
|
||||
|
||||
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
|
||||
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
|
||||
|
||||
// ErrBlobUnknown when blob is not found.
|
||||
ErrBlobUnknown = errors.New("unknown blob")
|
||||
|
||||
// ErrBlobUploadUnknown returned when upload is not found.
|
||||
ErrBlobUploadUnknown = errors.New("blob upload unknown")
|
||||
|
||||
// ErrBlobInvalidLength returned when the blob has an expected length on
|
||||
// commit, meaning mismatched with the descriptor or an invalid value.
|
||||
ErrBlobInvalidLength = errors.New("blob invalid length")
|
||||
)
|
||||
|
||||
// ErrBlobInvalidDigest returned when digest check fails.
|
||||
type ErrBlobInvalidDigest struct {
|
||||
Digest digest.Digest
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (err ErrBlobInvalidDigest) Error() string {
|
||||
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
|
||||
err.Digest, err.Reason)
|
||||
}
|
||||
|
||||
// ErrBlobMounted returned when a blob is mounted from another repository
|
||||
// instead of initiating an upload session.
|
||||
type ErrBlobMounted struct {
|
||||
From reference.Canonical
|
||||
Descriptor Descriptor
|
||||
}
|
||||
|
||||
func (err ErrBlobMounted) Error() string {
|
||||
return fmt.Sprintf("blob mounted from: %v to: %v",
|
||||
err.From, err.Descriptor)
|
||||
}
|
||||
|
||||
// Descriptor describes targeted content. Used in conjunction with a blob
|
||||
// store, a descriptor can be used to fetch, store and target any kind of
|
||||
// blob. The struct also describes the wire protocol format. Fields should
|
||||
// only be added but never changed.
|
||||
type Descriptor struct {
|
||||
// MediaType describe the type of the content. All text based formats are
|
||||
// encoded as utf-8.
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Digest uniquely identifies the content. A byte stream can be verified
|
||||
// against this digest.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// Size in bytes of content.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// URLs contains the source URLs of this content.
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
|
||||
// Annotations contains arbitrary metadata relating to the targeted content.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// Platform describes the platform which the image in the manifest runs on.
|
||||
// This should only be used when referring to a manifest.
|
||||
Platform *v1.Platform `json:"platform,omitempty"`
|
||||
|
||||
// NOTE: Before adding a field here, please ensure that all
|
||||
// other options have been exhausted. Much of the type relationships
|
||||
// depend on the simplicity of this type.
|
||||
}
|
||||
|
||||
// Descriptor returns the descriptor, to make it satisfy the Describable
|
||||
// interface. Note that implementations of Describable are generally objects
|
||||
// which can be described, not simply descriptors; this exception is in place
|
||||
// to make it more convenient to pass actual descriptors to functions that
|
||||
// expect Describable objects.
|
||||
func (d Descriptor) Descriptor() Descriptor {
|
||||
return d
|
||||
}
|
||||
|
||||
// BlobStatter makes blob descriptors available by digest. The service may
|
||||
// provide a descriptor of a different digest if the provided digest is not
|
||||
// canonical.
|
||||
type BlobStatter interface {
|
||||
// Stat provides metadata about a blob identified by the digest. If the
|
||||
// blob is unknown to the describer, ErrBlobUnknown will be returned.
|
||||
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
|
||||
}
|
||||
|
||||
// BlobDeleter enables deleting blobs from storage.
|
||||
type BlobDeleter interface {
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobEnumerator enables iterating over blobs from storage
|
||||
type BlobEnumerator interface {
|
||||
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
|
||||
}
|
||||
|
||||
// BlobDescriptorService manages metadata about a blob by digest. Most
|
||||
// implementations will not expose such an interface explicitly. Such mappings
|
||||
// should be maintained by interacting with the BlobIngester. Hence, this is
|
||||
// left off of BlobService and BlobStore.
|
||||
type BlobDescriptorService interface {
|
||||
BlobStatter
|
||||
|
||||
// SetDescriptor assigns the descriptor to the digest. The provided digest and
|
||||
// the digest in the descriptor must map to identical content but they may
|
||||
// differ on their algorithm. The descriptor must have the canonical
|
||||
// digest of the content and the digest algorithm must match the
|
||||
// annotators canonical algorithm.
|
||||
//
|
||||
// Such a facility can be used to map blobs between digest domains, with
|
||||
// the restriction that the algorithm of the descriptor must match the
|
||||
// canonical algorithm (ie sha256) of the annotator.
|
||||
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
|
||||
|
||||
// Clear enables descriptors to be unlinked
|
||||
Clear(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
|
||||
type BlobDescriptorServiceFactory interface {
|
||||
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
|
||||
}
|
||||
|
||||
// BlobProvider describes operations for getting blob data.
|
||||
type BlobProvider interface {
|
||||
// Get returns the entire blob identified by digest along with the descriptor.
|
||||
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
|
||||
|
||||
// Open provides an [io.ReadSeekCloser] to the blob identified by the provided
|
||||
// descriptor. If the blob is not known to the service, an error is returned.
|
||||
Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error)
|
||||
}
|
||||
|
||||
// BlobServer can serve blobs via http.
|
||||
type BlobServer interface {
|
||||
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
|
||||
// service may decide to redirect the client elsewhere or serve the data
|
||||
// directly.
|
||||
//
|
||||
// This handler only issues successful responses, such as 2xx or 3xx,
|
||||
// meaning it serves data or issues a redirect. If the blob is not
|
||||
// available, an error will be returned and the caller may still issue a
|
||||
// response.
|
||||
//
|
||||
// The implementation may serve the same blob from a different digest
|
||||
// domain. The appropriate headers will be set for the blob, unless they
|
||||
// have already been set by the caller.
|
||||
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobIngester ingests blob data.
|
||||
type BlobIngester interface {
|
||||
// Put inserts the content p into the blob service, returning a descriptor
|
||||
// or an error.
|
||||
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
|
||||
|
||||
// Create allocates a new blob writer to add a blob to this service. The
|
||||
// returned handle can be written to and later resumed using an opaque
|
||||
// identifier. With this approach, one can Close and Resume a BlobWriter
|
||||
// multiple times until the BlobWriter is committed or cancelled.
|
||||
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
|
||||
|
||||
// Resume attempts to resume a write to a blob, identified by an id.
|
||||
Resume(ctx context.Context, id string) (BlobWriter, error)
|
||||
}
|
||||
|
||||
// BlobCreateOption is a general extensible function argument for blob creation
|
||||
// methods. A BlobIngester may choose to honor any or none of the given
|
||||
// BlobCreateOptions, which can be specific to the implementation of the
|
||||
// BlobIngester receiving them.
|
||||
// TODO (brianbland): unify this with ManifestServiceOption in the future
|
||||
type BlobCreateOption interface {
|
||||
Apply(interface{}) error
|
||||
}
|
||||
|
||||
// CreateOptions is a collection of blob creation modifiers relevant to general
|
||||
// blob storage intended to be configured by the BlobCreateOption.Apply method.
|
||||
type CreateOptions struct {
|
||||
Mount struct {
|
||||
ShouldMount bool
|
||||
From reference.Canonical
|
||||
// Stat allows to pass precalculated descriptor to link and return.
|
||||
// Blob access check will be skipped if set.
|
||||
Stat *Descriptor
|
||||
}
|
||||
}
|
||||
|
||||
// BlobWriter provides a handle for inserting data into a blob store.
|
||||
// Instances should be obtained from BlobWriteService.Writer and
|
||||
// BlobWriteService.Resume. If supported by the store, a writer can be
|
||||
// recovered with the id.
|
||||
type BlobWriter interface {
|
||||
io.WriteCloser
|
||||
io.ReaderFrom
|
||||
|
||||
// Size returns the number of bytes written to this blob.
|
||||
Size() int64
|
||||
|
||||
// ID returns the identifier for this writer. The ID can be used with the
|
||||
// Blob service to later resume the write.
|
||||
ID() string
|
||||
|
||||
// StartedAt returns the time this blob write was started.
|
||||
StartedAt() time.Time
|
||||
|
||||
// Commit completes the blob writer process. The content is verified
|
||||
// against the provided provisional descriptor, which may result in an
|
||||
// error. Depending on the implementation, written data may be validated
|
||||
// against the provisional descriptor fields. If MediaType is not present,
|
||||
// the implementation may reject the commit or assign "application/octet-
|
||||
// stream" to the blob. The returned descriptor may have a different
|
||||
// digest depending on the blob store, referred to as the canonical
|
||||
// descriptor.
|
||||
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
|
||||
|
||||
// Cancel ends the blob write without storing any data and frees any
|
||||
// associated resources. Any data written thus far will be lost. Cancel
|
||||
// implementations should allow multiple calls even after a commit that
|
||||
// result in a no-op. This allows use of Cancel in a defer statement,
|
||||
// increasing the assurance that it is correctly called.
|
||||
Cancel(ctx context.Context) error
|
||||
}
|
||||
|
||||
// BlobService combines the operations to access, read and write blobs. This
|
||||
// can be used to describe remote blob services.
|
||||
type BlobService interface {
|
||||
BlobStatter
|
||||
BlobProvider
|
||||
BlobIngester
|
||||
}
|
||||
|
||||
// BlobStore represent the entire suite of blob related operations. Such an
|
||||
// implementation can access, read, write, delete and serve blobs.
|
||||
type BlobStore interface {
|
||||
BlobService
|
||||
BlobServer
|
||||
BlobDeleter
|
||||
}
|
99
cmd/digest/main.go
Normal file
99
cmd/digest/main.go
Normal file
|
@ -0,0 +1,99 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/distribution/distribution/v3/version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
)
|
||||
|
||||
var (
|
||||
algorithm = digest.Canonical
|
||||
showVersion bool
|
||||
)
|
||||
|
||||
type job struct {
|
||||
name string
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)")
|
||||
flag.Var(&algorithm, "algorithm", "select the digest algorithm")
|
||||
flag.BoolVar(&showVersion, "version", false, "show the version and exit")
|
||||
|
||||
log.SetFlags(0)
|
||||
log.SetPrefix(os.Args[0] + ": ")
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0])
|
||||
fmt.Fprint(os.Stderr, `
|
||||
Calculate the digest of one or more input files, emitting the result
|
||||
to standard out. If no files are provided, the digest of stdin will
|
||||
be calculated.
|
||||
|
||||
`)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func unsupported() {
|
||||
log.Fatalf("unsupported digest algorithm: %v", algorithm)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var jobs []job
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if showVersion {
|
||||
version.PrintVersion()
|
||||
return
|
||||
}
|
||||
|
||||
var fail bool // if we fail on one item, foul the exit code
|
||||
if flag.NArg() > 0 {
|
||||
for _, path := range flag.Args() {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Printf("%s: %v", path, err)
|
||||
fail = true
|
||||
continue
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
jobs = append(jobs, job{name: path, reader: fp})
|
||||
}
|
||||
} else {
|
||||
// just read stdin
|
||||
jobs = append(jobs, job{name: "-", reader: os.Stdin})
|
||||
}
|
||||
|
||||
digestFn := algorithm.FromReader
|
||||
|
||||
if !algorithm.Available() {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
dgst, err := digestFn(job.reader)
|
||||
if err != nil {
|
||||
log.Printf("%s: %v", job.name, err)
|
||||
fail = true
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%v\t%s\n", dgst, job.name)
|
||||
}
|
||||
|
||||
if fail {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
129
cmd/registry-api-descriptor-template/main.go
Normal file
129
cmd/registry-api-descriptor-template/main.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
// registry-api-descriptor-template uses the APIDescriptor defined in the
|
||||
// api/v2 package to execute templates passed to the command line.
|
||||
//
|
||||
// For example, to generate a new API specification, one would execute the
|
||||
// following command from the repo root:
|
||||
//
|
||||
// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
|
||||
//
|
||||
// The templates are passed in the api/v2.APIDescriptor object. Please see the
|
||||
// package documentation for fields available on that object. The template
|
||||
// syntax is from Go's standard library text/template package. For information
|
||||
// on Go's template syntax, please see golang.org/pkg/text/template.
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"text/template"
|
||||
|
||||
"github.com/distribution/distribution/v3/registry/api/errcode"
|
||||
v2 "github.com/distribution/distribution/v3/registry/api/v2"
|
||||
)
|
||||
|
||||
var spaceRegex = regexp.MustCompile(`\n\s*`)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatalln("please specify a template to execute.")
|
||||
}
|
||||
|
||||
path := os.Args[1]
|
||||
filename := filepath.Base(path)
|
||||
|
||||
funcMap := template.FuncMap{
|
||||
"removenewlines": func(s string) string {
|
||||
return spaceRegex.ReplaceAllString(s, " ")
|
||||
},
|
||||
"statustext": http.StatusText,
|
||||
"prettygorilla": prettyGorillaMuxPath,
|
||||
}
|
||||
|
||||
tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path))
|
||||
|
||||
data := struct {
|
||||
RouteDescriptors []v2.RouteDescriptor
|
||||
ErrorDescriptors []errcode.ErrorDescriptor
|
||||
}{
|
||||
RouteDescriptors: v2.APIDescriptor.RouteDescriptors,
|
||||
ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"),
|
||||
// The following are part of the specification but provided by errcode default.
|
||||
errcode.ErrorCodeUnauthorized.Descriptor(),
|
||||
errcode.ErrorCodeDenied.Descriptor(),
|
||||
errcode.ErrorCodeUnsupported.Descriptor()),
|
||||
}
|
||||
|
||||
if err := tmpl.Execute(os.Stdout, data); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux
|
||||
// route string, making it suitable for documentation.
|
||||
func prettyGorillaMuxPath(s string) string {
|
||||
// Stateful parser that removes regular expressions from gorilla
|
||||
// routes. It correctly handles balanced bracket pairs.
|
||||
|
||||
var output string
|
||||
var label string
|
||||
var level int
|
||||
|
||||
start:
|
||||
if s[0] == '{' {
|
||||
s = s[1:]
|
||||
level++
|
||||
goto capture
|
||||
}
|
||||
|
||||
output += string(s[0])
|
||||
s = s[1:]
|
||||
|
||||
goto end
|
||||
capture:
|
||||
switch s[0] {
|
||||
case '{':
|
||||
level++
|
||||
case '}':
|
||||
level--
|
||||
|
||||
if level == 0 {
|
||||
s = s[1:]
|
||||
goto label
|
||||
}
|
||||
case ':':
|
||||
s = s[1:]
|
||||
goto skip
|
||||
default:
|
||||
label += string(s[0])
|
||||
}
|
||||
s = s[1:]
|
||||
goto capture
|
||||
skip:
|
||||
switch s[0] {
|
||||
case '{':
|
||||
level++
|
||||
case '}':
|
||||
level--
|
||||
}
|
||||
s = s[1:]
|
||||
|
||||
if level == 0 {
|
||||
goto label
|
||||
}
|
||||
|
||||
goto skip
|
||||
label:
|
||||
if label != "" {
|
||||
output += "<" + label + ">"
|
||||
label = ""
|
||||
}
|
||||
end:
|
||||
if s != "" {
|
||||
goto start
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
58
cmd/registry/config-cache.yml
Normal file
58
cmd/registry/config-cache.yml
Normal file
|
@ -0,0 +1,58 @@
|
|||
version: 0.1
|
||||
log:
|
||||
level: debug
|
||||
fields:
|
||||
service: registry
|
||||
environment: development
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: redis
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry-cache
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
tag:
|
||||
concurrencylimit: 8
|
||||
http:
|
||||
addr: :5000
|
||||
secret: asecretforlocaldevelopment
|
||||
debug:
|
||||
addr: localhost:5001
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
redis:
|
||||
addrs: [localhost:6379]
|
||||
maxidleconns: 16
|
||||
poolsize: 64
|
||||
connmaxidletime: 300s
|
||||
dialtimeout: 10ms
|
||||
readtimeout: 10ms
|
||||
writetimeout: 10ms
|
||||
notifications:
|
||||
events:
|
||||
includereferences: true
|
||||
endpoints:
|
||||
- name: local-8082
|
||||
url: http://localhost:5003/callback
|
||||
headers:
|
||||
Authorization: [Bearer <an example token>]
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
- name: local-8083
|
||||
url: http://localhost:8083/callback
|
||||
timeout: 1s
|
||||
threshold: 10
|
||||
backoff: 1s
|
||||
disabled: true
|
||||
proxy:
|
||||
remoteurl: https://registry-1.docker.io
|
||||
username: username
|
||||
password: password
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
50
cmd/registry/config-dev-frostfs.yml
Normal file
50
cmd/registry/config-dev-frostfs.yml
Normal file
|
@ -0,0 +1,50 @@
|
|||
version: 0.1
|
||||
log:
|
||||
level: debug
|
||||
fields:
|
||||
service: registry
|
||||
environment: development
|
||||
storage:
|
||||
delete:
|
||||
enabled: true
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
frostfs:
|
||||
wallet:
|
||||
path: /path/to/wallet.json
|
||||
password: ""
|
||||
peers:
|
||||
0:
|
||||
address: s01.frostfs.devenv:8080
|
||||
weight: 1
|
||||
priority: 1
|
||||
1:
|
||||
address: s02.frostfs.devenv:8080
|
||||
weight: 1
|
||||
priority: 1
|
||||
2:
|
||||
address: s03.frostfs.devenv:8080
|
||||
weight: 1
|
||||
priority: 1
|
||||
3:
|
||||
address: s04.frostfs.devenv:8080
|
||||
weight: 1
|
||||
priority: 1
|
||||
# container can be nicename (rpc_endpoint is required)
|
||||
container: ChzA3qeJHbAT2nyo35LofdJ7jMqVuT9h3WoRpxHRn9Uq
|
||||
# the following params are optional
|
||||
session_expiration_duration: 1000 # in blocks
|
||||
connection_timeout: 5s
|
||||
request_timeout: 5s
|
||||
rebalance_interval: 30s
|
||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [ nosniff ]
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 30s
|
||||
threshold: 3
|
32
cmd/registry/config-dev.yml
Normal file
32
cmd/registry/config-dev.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
version: 0.1
|
||||
log:
|
||||
level: debug
|
||||
fields:
|
||||
service: registry
|
||||
environment: development
|
||||
storage:
|
||||
delete:
|
||||
enabled: true
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: false
|
||||
tag:
|
||||
concurrencylimit: 8
|
||||
http:
|
||||
addr: :5000
|
||||
debug:
|
||||
addr: :5001
|
||||
prometheus:
|
||||
enabled: true
|
||||
path: /metrics
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
24
cmd/registry/config-example.yml
Normal file
24
cmd/registry/config-example.yml
Normal file
|
@ -0,0 +1,24 @@
|
|||
version: 0.1
|
||||
log:
|
||||
fields:
|
||||
service: registry
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
tag:
|
||||
concurrencylimit: 8
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
auth:
|
||||
htpasswd:
|
||||
realm: basic-realm
|
||||
path: /etc/registry
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: true
|
||||
interval: 10s
|
||||
threshold: 3
|
28
cmd/registry/main.go
Normal file
28
cmd/registry/main.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/distribution/distribution/v3/registry"
|
||||
_ "github.com/distribution/distribution/v3/registry/auth/htpasswd"
|
||||
_ "github.com/distribution/distribution/v3/registry/auth/silly"
|
||||
_ "github.com/distribution/distribution/v3/registry/auth/token"
|
||||
_ "github.com/distribution/distribution/v3/registry/proxy"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/azure"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/frostfs"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/gcs"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/cloudfront"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/redirect"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/rewrite"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// NOTE(milosgajdos): if the only two commands registered
|
||||
// with registry.RootCmd fail they will halt the program
|
||||
// execution and exit the program with non-zero exit code.
|
||||
// nolint:errcheck
|
||||
registry.RootCmd.Execute()
|
||||
}
|
885
configuration/configuration.go
Normal file
885
configuration/configuration.go
Normal file
|
@ -0,0 +1,885 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
|
||||
// optionally modified by environment variables.
|
||||
//
|
||||
// Note that yaml field names should never include _ characters, since this is the separator used
|
||||
// in environment variable names.
|
||||
type Configuration struct {
|
||||
// Version is the version which defines the format of the rest of the configuration
|
||||
Version Version `yaml:"version"`
|
||||
|
||||
// Log supports setting various parameters related to the logging
|
||||
// subsystem.
|
||||
Log struct {
|
||||
// AccessLog configures access logging.
|
||||
AccessLog struct {
|
||||
// Disabled disables access logging.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"accesslog,omitempty"`
|
||||
|
||||
// Level is the granularity at which registry operations are logged.
|
||||
Level Loglevel `yaml:"level,omitempty"`
|
||||
|
||||
// Formatter overrides the default formatter with another. Options
|
||||
// include "text", "json" and "logstash".
|
||||
Formatter string `yaml:"formatter,omitempty"`
|
||||
|
||||
// Fields allows users to specify static string fields to include in
|
||||
// the logger context.
|
||||
Fields map[string]interface{} `yaml:"fields,omitempty"`
|
||||
|
||||
// Hooks allows users to configure the log hooks, to enabling the
|
||||
// sequent handling behavior, when defined levels of log message emit.
|
||||
Hooks []LogHook `yaml:"hooks,omitempty"`
|
||||
|
||||
// ReportCaller allows user to configure the log to report the caller
|
||||
ReportCaller bool `yaml:"reportcaller,omitempty"`
|
||||
}
|
||||
|
||||
// Loglevel is the level at which registry operations are logged.
|
||||
//
|
||||
// Deprecated: Use Log.Level instead.
|
||||
Loglevel Loglevel `yaml:"loglevel,omitempty"`
|
||||
|
||||
// Storage is the configuration for the registry's storage driver
|
||||
Storage Storage `yaml:"storage"`
|
||||
|
||||
// Auth allows configuration of various authorization methods that may be
|
||||
// used to gate requests.
|
||||
Auth Auth `yaml:"auth,omitempty"`
|
||||
|
||||
// Middleware lists all middlewares to be used by the registry.
|
||||
Middleware map[string][]Middleware `yaml:"middleware,omitempty"`
|
||||
|
||||
// HTTP contains configuration parameters for the registry's http
|
||||
// interface.
|
||||
HTTP struct {
|
||||
// Addr specifies the bind address for the registry instance.
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
|
||||
// Net specifies the net portion of the bind address. A default empty value means tcp.
|
||||
Net string `yaml:"net,omitempty"`
|
||||
|
||||
// Host specifies an externally-reachable address for the registry, as a fully
|
||||
// qualified URL.
|
||||
Host string `yaml:"host,omitempty"`
|
||||
|
||||
Prefix string `yaml:"prefix,omitempty"`
|
||||
|
||||
// Secret specifies the secret key which HMAC tokens are created with.
|
||||
Secret string `yaml:"secret,omitempty"`
|
||||
|
||||
// RelativeURLs specifies that relative URLs should be returned in
|
||||
// Location headers
|
||||
RelativeURLs bool `yaml:"relativeurls,omitempty"`
|
||||
|
||||
// Amount of time to wait for connection to drain before shutting down when registry
|
||||
// receives a stop signal
|
||||
DrainTimeout time.Duration `yaml:"draintimeout,omitempty"`
|
||||
|
||||
// TLS instructs the http server to listen with a TLS configuration.
|
||||
// This only support simple tls configuration with a cert and key.
|
||||
// Mostly, this is useful for testing situations or simple deployments
|
||||
// that require tls. If more complex configurations are required, use
|
||||
// a proxy or make a proposal to add support here.
|
||||
TLS struct {
|
||||
// Certificate specifies the path to an x509 certificate file to
|
||||
// be used for TLS.
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
|
||||
// Key specifies the path to the x509 key file, which should
|
||||
// contain the private portion for the file specified in
|
||||
// Certificate.
|
||||
Key string `yaml:"key,omitempty"`
|
||||
|
||||
// Specifies the CA certs for client authentication
|
||||
// A file may contain multiple CA certificates encoded as PEM
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
|
||||
// Specifies the lowest TLS version allowed
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
|
||||
// Specifies a list of cipher suites allowed
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
|
||||
// LetsEncrypt is used to configuration setting up TLS through
|
||||
// Let's Encrypt instead of manually specifying certificate and
|
||||
// key. If a TLS certificate is specified, the Let's Encrypt
|
||||
// section will not be used.
|
||||
LetsEncrypt struct {
|
||||
// CacheFile specifies cache file to use for lets encrypt
|
||||
// certificates and keys.
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
|
||||
// Email is the email to use during Let's Encrypt registration
|
||||
Email string `yaml:"email,omitempty"`
|
||||
|
||||
// Hosts specifies the hosts which are allowed to obtain Let's
|
||||
// Encrypt certificates.
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
|
||||
// DirectoryURL points to the CA directory endpoint.
|
||||
// If empty, LetsEncrypt is used.
|
||||
DirectoryURL string `yaml:"directoryurl,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
} `yaml:"tls,omitempty"`
|
||||
|
||||
// Headers is a set of headers to include in HTTP responses. A common
|
||||
// use case for this would be security headers such as
|
||||
// Strict-Transport-Security. The map keys are the header names, and
|
||||
// the values are the associated header payloads.
|
||||
Headers http.Header `yaml:"headers,omitempty"`
|
||||
|
||||
// Debug configures the http debug interface, if specified. This can
|
||||
// include services such as pprof, expvar and other data that should
|
||||
// not be exposed externally. Left disabled by default.
|
||||
Debug struct {
|
||||
// Addr specifies the bind address for the debug server.
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
// Prometheus configures the Prometheus telemetry endpoint.
|
||||
Prometheus struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
Path string `yaml:"path,omitempty"`
|
||||
} `yaml:"prometheus,omitempty"`
|
||||
} `yaml:"debug,omitempty"`
|
||||
|
||||
// HTTP2 configuration options
|
||||
HTTP2 struct {
|
||||
// Specifies whether the registry should disallow clients attempting
|
||||
// to connect via HTTP/2. If set to true, only HTTP/1.1 is supported.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"http2,omitempty"`
|
||||
|
||||
H2C struct {
|
||||
// Enables H2C (HTTP/2 Cleartext). Enable to support HTTP/2 without needing to configure TLS
|
||||
// Useful when deploying the registry behind a load balancer (e.g. Cloud Run)
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
} `yaml:"h2c,omitempty"`
|
||||
} `yaml:"http,omitempty"`
|
||||
|
||||
// Notifications specifies configuration about various endpoint to which
|
||||
// registry events are dispatched.
|
||||
Notifications Notifications `yaml:"notifications,omitempty"`
|
||||
|
||||
// Redis configures the redis pool available to the registry webapp.
|
||||
Redis Redis `yaml:"redis,omitempty"`
|
||||
|
||||
Health Health `yaml:"health,omitempty"`
|
||||
Catalog Catalog `yaml:"catalog,omitempty"`
|
||||
|
||||
Proxy Proxy `yaml:"proxy,omitempty"`
|
||||
|
||||
// Validation configures validation options for the registry.
|
||||
Validation Validation `yaml:"validation,omitempty"`
|
||||
|
||||
// Policy configures registry policy options.
|
||||
Policy struct {
|
||||
// Repository configures policies for repositories
|
||||
Repository struct {
|
||||
// Classes is a list of repository classes which the
|
||||
// registry allows content for. This class is matched
|
||||
// against the configuration media type inside uploaded
|
||||
// manifests. When non-empty, the registry will enforce
|
||||
// the class in authorized resources.
|
||||
Classes []string `yaml:"classes"`
|
||||
} `yaml:"repository,omitempty"`
|
||||
} `yaml:"policy,omitempty"`
|
||||
}
|
||||
|
||||
// Catalog is composed of MaxEntries.
|
||||
// Catalog endpoint (/v2/_catalog) configuration, it provides the configuration
|
||||
// options to control the maximum number of entries returned by the catalog endpoint.
|
||||
type Catalog struct {
|
||||
// Max number of entries returned by the catalog endpoint. Requesting n entries
|
||||
// to the catalog endpoint will return at most MaxEntries entries.
|
||||
// An empty or a negative value will set a default of 1000 maximum entries by default.
|
||||
MaxEntries int `yaml:"maxentries,omitempty"`
|
||||
}
|
||||
|
||||
// LogHook is composed of hook Level and Type.
|
||||
// After hooks configuration, it can execute the next handling automatically,
|
||||
// when defined levels of log message emitted.
|
||||
// Example: hook can sending an email notification when error log happens in app.
|
||||
type LogHook struct {
|
||||
// Disable lets user select to enable hook or not.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
|
||||
// Type allows user to select which type of hook handler they want.
|
||||
Type string `yaml:"type,omitempty"`
|
||||
|
||||
// Levels set which levels of log message will let hook executed.
|
||||
Levels []string `yaml:"levels,omitempty"`
|
||||
|
||||
// MailOptions allows user to configure email parameters.
|
||||
MailOptions MailOptions `yaml:"options,omitempty"`
|
||||
}
|
||||
|
||||
// MailOptions provides the configuration sections to user, for specific handler.
|
||||
type MailOptions struct {
|
||||
SMTP struct {
|
||||
// Addr defines smtp host address
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
|
||||
// Username defines user name to smtp host
|
||||
Username string `yaml:"username,omitempty"`
|
||||
|
||||
// Password defines password of login user
|
||||
Password string `yaml:"password,omitempty"`
|
||||
|
||||
// Insecure defines if smtp login skips the secure certification.
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
} `yaml:"smtp,omitempty"`
|
||||
|
||||
// From defines mail sending address
|
||||
From string `yaml:"from,omitempty"`
|
||||
|
||||
// To defines mail receiving address
|
||||
To []string `yaml:"to,omitempty"`
|
||||
}
|
||||
|
||||
// FileChecker is a type of entry in the health section for checking files.
|
||||
type FileChecker struct {
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// File is the path to check
|
||||
File string `yaml:"file,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
|
||||
type HTTPChecker struct {
|
||||
// Timeout is the duration to wait before timing out the HTTP request
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
// StatusCode is the expected status code
|
||||
StatusCode int
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// URI is the HTTP URI to check
|
||||
URI string `yaml:"uri,omitempty"`
|
||||
// Headers lists static headers that should be added to all requests
|
||||
Headers http.Header `yaml:"headers"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// TCPChecker is a type of entry in the health section for checking TCP servers.
|
||||
type TCPChecker struct {
|
||||
// Timeout is the duration to wait before timing out the TCP connection
|
||||
Timeout time.Duration `yaml:"timeout,omitempty"`
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// Addr is the TCP address to check
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
}
|
||||
|
||||
// Health provides the configuration section for health checks.
|
||||
type Health struct {
|
||||
// FileCheckers is a list of paths to check
|
||||
FileCheckers []FileChecker `yaml:"file,omitempty"`
|
||||
// HTTPCheckers is a list of URIs to check
|
||||
HTTPCheckers []HTTPChecker `yaml:"http,omitempty"`
|
||||
// TCPCheckers is a list of URIs to check
|
||||
TCPCheckers []TCPChecker `yaml:"tcp,omitempty"`
|
||||
// StorageDriver configures a health check on the configured storage
|
||||
// driver
|
||||
StorageDriver struct {
|
||||
// Enabled turns on the health check for the storage driver
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
// Interval is the duration in between checks
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
// Threshold is the number of times a check must fail to trigger an
|
||||
// unhealthy state
|
||||
Threshold int `yaml:"threshold,omitempty"`
|
||||
} `yaml:"storagedriver,omitempty"`
|
||||
}
|
||||
|
||||
type Platform struct {
|
||||
// Architecture is the architecture for this platform
|
||||
Architecture string `yaml:"architecture,omitempty"`
|
||||
// OS is the operating system for this platform
|
||||
OS string `yaml:"os,omitempty"`
|
||||
}
|
||||
|
||||
// v0_1Configuration is a Version 0.1 Configuration struct
|
||||
// This is currently aliased to Configuration, as it is the current version
|
||||
type v0_1Configuration Configuration
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers
|
||||
func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var versionString string
|
||||
err := unmarshal(&versionString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newVersion := Version(versionString)
|
||||
if _, err := newVersion.major(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := newVersion.minor(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*version = newVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentVersion is the most recent Version that can be parsed
|
||||
var CurrentVersion = MajorMinorVersion(0, 1)
|
||||
|
||||
// Loglevel is the level at which operations are logged
|
||||
// This can be error, warn, info, or debug
|
||||
type Loglevel string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Umarshaler interface
|
||||
// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a
|
||||
// valid loglevel
|
||||
func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var loglevelString string
|
||||
err := unmarshal(&loglevelString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loglevelString = strings.ToLower(loglevelString)
|
||||
switch loglevelString {
|
||||
case "error", "warn", "info", "debug":
|
||||
default:
|
||||
return fmt.Errorf("invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
|
||||
}
|
||||
|
||||
*loglevel = Loglevel(loglevelString)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parameters defines a key-value parameters mapping
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
// Storage defines the configuration for registry object storage
|
||||
type Storage map[string]Parameters
|
||||
|
||||
// Type returns the storage driver type, such as filesystem or s3
|
||||
func (storage Storage) Type() string {
|
||||
var storageType []string
|
||||
|
||||
// Return only key in this map
|
||||
for k := range storage {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
case "delete":
|
||||
// allow configuration of delete
|
||||
case "redirect":
|
||||
// allow configuration of redirect
|
||||
case "tag":
|
||||
// allow configuration of tag
|
||||
default:
|
||||
storageType = append(storageType, k)
|
||||
}
|
||||
}
|
||||
if len(storageType) > 1 {
|
||||
panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", "))
|
||||
}
|
||||
if len(storageType) == 1 {
|
||||
return storageType[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// TagParameters returns the Parameters map for a Storage tag configuration
|
||||
func (storage Storage) TagParameters() Parameters {
|
||||
return storage["tag"]
|
||||
}
|
||||
|
||||
// setTagParameter changes the parameter at the provided key to the new value
|
||||
func (storage Storage) setTagParameter(key string, value interface{}) {
|
||||
if _, ok := storage["tag"]; !ok {
|
||||
storage["tag"] = make(Parameters)
|
||||
}
|
||||
storage["tag"][key] = value
|
||||
}
|
||||
|
||||
// Parameters returns the Parameters map for a Storage configuration
|
||||
func (storage Storage) Parameters() Parameters {
|
||||
return storage[storage.Type()]
|
||||
}
|
||||
|
||||
// setParameter changes the parameter at the provided key to the new value
|
||||
func (storage Storage) setParameter(key string, value interface{}) {
|
||||
storage[storage.Type()][key] = value
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||
func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var storageMap map[string]Parameters
|
||||
err := unmarshal(&storageMap)
|
||||
if err == nil {
|
||||
if len(storageMap) > 1 {
|
||||
types := make([]string, 0, len(storageMap))
|
||||
for k := range storageMap {
|
||||
switch k {
|
||||
case "maintenance":
|
||||
// allow for configuration of maintenance
|
||||
case "cache":
|
||||
// allow configuration of caching
|
||||
case "delete":
|
||||
// allow configuration of delete
|
||||
case "redirect":
|
||||
// allow configuration of redirect
|
||||
case "tag":
|
||||
// allow configuration of tag
|
||||
default:
|
||||
types = append(types, k)
|
||||
}
|
||||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
return fmt.Errorf("must provide exactly one storage type. Provided: %v", types)
|
||||
}
|
||||
}
|
||||
*storage = storageMap
|
||||
return nil
|
||||
}
|
||||
|
||||
var storageType string
|
||||
err = unmarshal(&storageType)
|
||||
if err == nil {
|
||||
*storage = Storage{storageType: Parameters{}}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface
|
||||
func (storage Storage) MarshalYAML() (interface{}, error) {
|
||||
if storage.Parameters() == nil {
|
||||
return storage.Type(), nil
|
||||
}
|
||||
return map[string]Parameters(storage), nil
|
||||
}
|
||||
|
||||
// Auth defines the configuration for registry authorization.
|
||||
type Auth map[string]Parameters
|
||||
|
||||
// Type returns the auth type, such as htpasswd or token
|
||||
func (auth Auth) Type() string {
|
||||
// Return only key in this map
|
||||
for k := range auth {
|
||||
return k
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parameters returns the Parameters map for an Auth configuration
|
||||
func (auth Auth) Parameters() Parameters {
|
||||
return auth[auth.Type()]
|
||||
}
|
||||
|
||||
// setParameter changes the parameter at the provided key to the new value
|
||||
func (auth Auth) setParameter(key string, value interface{}) {
|
||||
auth[auth.Type()][key] = value
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface
|
||||
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
|
||||
func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var m map[string]Parameters
|
||||
err := unmarshal(&m)
|
||||
if err == nil {
|
||||
if len(m) > 1 {
|
||||
types := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
types = append(types, k)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): May want to change this slightly for
|
||||
// authorization to allow multiple challenges.
|
||||
return fmt.Errorf("must provide exactly one type. Provided: %v", types)
|
||||
|
||||
}
|
||||
*auth = m
|
||||
return nil
|
||||
}
|
||||
|
||||
var authType string
|
||||
err = unmarshal(&authType)
|
||||
if err == nil {
|
||||
*auth = Auth{authType: Parameters{}}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface
|
||||
func (auth Auth) MarshalYAML() (interface{}, error) {
|
||||
if auth.Parameters() == nil {
|
||||
return auth.Type(), nil
|
||||
}
|
||||
return map[string]Parameters(auth), nil
|
||||
}
|
||||
|
||||
// Notifications configures multiple http endpoints.
|
||||
type Notifications struct {
|
||||
// EventConfig is the configuration for the event format that is sent to each Endpoint.
|
||||
EventConfig Events `yaml:"events,omitempty"`
|
||||
// Endpoints is a list of http configurations for endpoints that
|
||||
// respond to webhook notifications. In the future, we may allow other
|
||||
// kinds of endpoints, such as external queues.
|
||||
Endpoints []Endpoint `yaml:"endpoints,omitempty"`
|
||||
}
|
||||
|
||||
// Endpoint describes the configuration of an http webhook notification
|
||||
// endpoint.
|
||||
type Endpoint struct {
|
||||
Name string `yaml:"name"` // identifies the endpoint in the registry instance.
|
||||
Disabled bool `yaml:"disabled"` // disables the endpoint
|
||||
URL string `yaml:"url"` // post url for the endpoint.
|
||||
Headers http.Header `yaml:"headers"` // static headers that should be added to all requests
|
||||
Timeout time.Duration `yaml:"timeout"` // HTTP timeout
|
||||
Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure
|
||||
Backoff time.Duration `yaml:"backoff"` // backoff duration
|
||||
IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore
|
||||
Ignore Ignore `yaml:"ignore"` // ignore event types
|
||||
}
|
||||
|
||||
// Events configures notification events.
|
||||
type Events struct {
|
||||
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
|
||||
}
|
||||
|
||||
// Ignore configures mediaTypes and actions of the event, that it won't be propagated
|
||||
type Ignore struct {
|
||||
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
|
||||
Actions []string `yaml:"actions"` // ignore action types
|
||||
}
|
||||
|
||||
// Middleware configures named middlewares to be applied at injection points.
|
||||
type Middleware struct {
|
||||
// Name the middleware registers itself as
|
||||
Name string `yaml:"name"`
|
||||
// Flag to disable middleware easily
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
// Map of parameters that will be passed to the middleware's initialization function
|
||||
Options Parameters `yaml:"options"`
|
||||
}
|
||||
|
||||
// Proxy configures the registry as a pull through cache
|
||||
type Proxy struct {
|
||||
// RemoteURL is the URL of the remote registry
|
||||
RemoteURL string `yaml:"remoteurl"`
|
||||
|
||||
// Username of the hub user
|
||||
Username string `yaml:"username"`
|
||||
|
||||
// Password of the hub user
|
||||
Password string `yaml:"password"`
|
||||
|
||||
// TTL is the expiry time of the content and will be cleaned up when it expires
|
||||
// if not set, defaults to 7 * 24 hours
|
||||
// If set to zero, will never expire cache
|
||||
TTL *time.Duration `yaml:"ttl,omitempty"`
|
||||
}
|
||||
|
||||
type Validation struct {
|
||||
// Enabled enables the other options in this section. This field is
|
||||
// deprecated in favor of Disabled.
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
// Disabled disables the other options in this section.
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
// Manifests configures manifest validation.
|
||||
Manifests ValidationManifests `yaml:"manifests,omitempty"`
|
||||
}
|
||||
|
||||
type ValidationManifests struct {
|
||||
// URLs configures validation for URLs in pushed manifests.
|
||||
URLs struct {
|
||||
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||
// that URLs in pushed manifests must match.
|
||||
Allow []string `yaml:"allow,omitempty"`
|
||||
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||
// that URLs in pushed manifests must not match.
|
||||
Deny []string `yaml:"deny,omitempty"`
|
||||
} `yaml:"urls,omitempty"`
|
||||
// ImageIndexes configures validation of image indexes
|
||||
Indexes ValidationIndexes `yaml:"indexes,omitempty"`
|
||||
}
|
||||
|
||||
type ValidationIndexes struct {
|
||||
// Platforms configures the validation applies to the platform images included in an image index
|
||||
Platforms Platforms `yaml:"platforms"`
|
||||
// PlatformList filters the set of platforms to validate for image existence.
|
||||
PlatformList []Platform `yaml:"platformlist,omitempty"`
|
||||
}
|
||||
|
||||
// Platforms configures the validation applies to the platform images included in an image index
|
||||
// This can be all, none, or list
|
||||
type Platforms string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Umarshaler interface
|
||||
// Unmarshals a string into a Platforms option, lowercasing the string and validating that it represents a
|
||||
// valid option
|
||||
func (platforms *Platforms) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var platformsString string
|
||||
err := unmarshal(&platformsString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
platformsString = strings.ToLower(platformsString)
|
||||
switch platformsString {
|
||||
case "all", "none", "list":
|
||||
default:
|
||||
return fmt.Errorf("invalid platforms option %s Must be one of [all, none, list]", platformsString)
|
||||
}
|
||||
|
||||
*platforms = Platforms(platformsString)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse parses an input configuration yaml document into a Configuration struct
|
||||
// This should generally be capable of handling old configuration format versions
|
||||
//
|
||||
// Environment variables may be used to override configuration parameters other than version,
|
||||
// following the scheme below:
|
||||
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
|
||||
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
|
||||
func Parse(rd io.Reader) (*Configuration, error) {
|
||||
in, err := io.ReadAll(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := NewParser("registry", []VersionedParseInfo{
|
||||
{
|
||||
Version: MajorMinorVersion(0, 1),
|
||||
ParseAs: reflect.TypeOf(v0_1Configuration{}),
|
||||
ConversionFunc: func(c interface{}) (interface{}, error) {
|
||||
if v0_1, ok := c.(*v0_1Configuration); ok {
|
||||
if v0_1.Log.Level == Loglevel("") {
|
||||
if v0_1.Loglevel != Loglevel("") {
|
||||
v0_1.Log.Level = v0_1.Loglevel
|
||||
} else {
|
||||
v0_1.Log.Level = Loglevel("info")
|
||||
}
|
||||
}
|
||||
if v0_1.Loglevel != Loglevel("") {
|
||||
v0_1.Loglevel = Loglevel("")
|
||||
}
|
||||
|
||||
if v0_1.Catalog.MaxEntries <= 0 {
|
||||
v0_1.Catalog.MaxEntries = 1000
|
||||
}
|
||||
|
||||
if v0_1.Storage.Type() == "" {
|
||||
return nil, errors.New("no storage configuration provided")
|
||||
}
|
||||
return (*Configuration)(v0_1), nil
|
||||
}
|
||||
return nil, fmt.Errorf("expected *v0_1Configuration, received %#v", c)
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
config := new(Configuration)
|
||||
err = p.Parse(in, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
type RedisOptions = redis.UniversalOptions
|
||||
|
||||
type RedisTLSOptions struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
}
|
||||
|
||||
type Redis struct {
|
||||
Options RedisOptions `yaml:",inline"`
|
||||
TLS RedisTLSOptions `yaml:"tls,omitempty"`
|
||||
}
|
||||
|
||||
func (c Redis) MarshalYAML() (interface{}, error) {
|
||||
fields := make(map[string]interface{})
|
||||
|
||||
val := reflect.ValueOf(c.Options)
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := typ.Field(i)
|
||||
fieldValue := val.Field(i)
|
||||
|
||||
// ignore funcs fields in redis.UniversalOptions
|
||||
if fieldValue.Kind() == reflect.Func {
|
||||
continue
|
||||
}
|
||||
|
||||
fields[strings.ToLower(field.Name)] = fieldValue.Interface()
|
||||
}
|
||||
|
||||
// Add TLS fields if they're not empty
|
||||
if c.TLS.Certificate != "" || c.TLS.Key != "" || len(c.TLS.ClientCAs) > 0 {
|
||||
fields["tls"] = c.TLS
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func (c *Redis) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var fields map[string]interface{}
|
||||
err := unmarshal(&fields)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(&c.Options).Elem()
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := typ.Field(i)
|
||||
fieldName := strings.ToLower(field.Name)
|
||||
|
||||
if value, ok := fields[fieldName]; ok {
|
||||
fieldValue := val.Field(i)
|
||||
if fieldValue.CanSet() {
|
||||
switch field.Type {
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
durationStr, ok := value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid duration value for field: %s", fieldName)
|
||||
}
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse duration for field: %s, error: %v", fieldName, err)
|
||||
}
|
||||
fieldValue.Set(reflect.ValueOf(duration))
|
||||
default:
|
||||
if err := setFieldValue(fieldValue, value); err != nil {
|
||||
return fmt.Errorf("failed to set value for field: %s, error: %v", fieldName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle TLS fields
|
||||
if tlsData, ok := fields["tls"]; ok {
|
||||
tlsMap, ok := tlsData.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid TLS data structure")
|
||||
}
|
||||
|
||||
if cert, ok := tlsMap["certificate"]; ok {
|
||||
var isString bool
|
||||
c.TLS.Certificate, isString = cert.(string)
|
||||
if !isString {
|
||||
return fmt.Errorf("Redis TLS certificate must be a string")
|
||||
}
|
||||
}
|
||||
if key, ok := tlsMap["key"]; ok {
|
||||
var isString bool
|
||||
c.TLS.Key, isString = key.(string)
|
||||
if !isString {
|
||||
return fmt.Errorf("Redis TLS (private) key must be a string")
|
||||
}
|
||||
}
|
||||
if cas, ok := tlsMap["clientcas"]; ok {
|
||||
caList, ok := cas.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid clientcas data structure")
|
||||
}
|
||||
for _, ca := range caList {
|
||||
if caStr, ok := ca.(string); ok {
|
||||
c.TLS.ClientCAs = append(c.TLS.ClientCAs, caStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFieldValue(field reflect.Value, value interface{}) error {
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch field.Kind() {
|
||||
case reflect.String:
|
||||
stringValue, ok := value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to string")
|
||||
}
|
||||
field.SetString(stringValue)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
intValue, ok := value.(int)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to integer")
|
||||
}
|
||||
field.SetInt(int64(intValue))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
uintValue, ok := value.(uint)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to unsigned integer")
|
||||
}
|
||||
field.SetUint(uint64(uintValue))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
floatValue, ok := value.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to float")
|
||||
}
|
||||
field.SetFloat(floatValue)
|
||||
case reflect.Bool:
|
||||
boolValue, ok := value.(bool)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to boolean")
|
||||
}
|
||||
field.SetBool(boolValue)
|
||||
case reflect.Slice:
|
||||
slice := reflect.MakeSlice(field.Type(), 0, 0)
|
||||
valueSlice, ok := value.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert value to slice")
|
||||
}
|
||||
for _, item := range valueSlice {
|
||||
sliceValue := reflect.New(field.Type().Elem()).Elem()
|
||||
if err := setFieldValue(sliceValue, item); err != nil {
|
||||
return err
|
||||
}
|
||||
slice = reflect.Append(slice, sliceValue)
|
||||
}
|
||||
field.Set(slice)
|
||||
default:
|
||||
return fmt.Errorf("unsupported field type: %v", field.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
607
configuration/configuration_test.go
Normal file
607
configuration/configuration_test.go
Normal file
|
@ -0,0 +1,607 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// configStruct is a canonical example configuration, which should map to configYamlV0_1
|
||||
var configStruct = Configuration{
|
||||
Version: "0.1",
|
||||
Log: struct {
|
||||
AccessLog struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"accesslog,omitempty"`
|
||||
Level Loglevel `yaml:"level,omitempty"`
|
||||
Formatter string `yaml:"formatter,omitempty"`
|
||||
Fields map[string]interface{} `yaml:"fields,omitempty"`
|
||||
Hooks []LogHook `yaml:"hooks,omitempty"`
|
||||
ReportCaller bool `yaml:"reportcaller,omitempty"`
|
||||
}{
|
||||
Level: "info",
|
||||
Fields: map[string]interface{}{"environment": "test"},
|
||||
},
|
||||
Storage: Storage{
|
||||
"somedriver": Parameters{
|
||||
"string1": "string-value1",
|
||||
"string2": "string-value2",
|
||||
"bool1": true,
|
||||
"bool2": false,
|
||||
"nil1": nil,
|
||||
"int1": 42,
|
||||
"url1": "https://foo.example.com",
|
||||
"path1": "/some-path",
|
||||
},
|
||||
"tag": Parameters{
|
||||
"concurrencylimit": 10,
|
||||
},
|
||||
},
|
||||
Auth: Auth{
|
||||
"silly": Parameters{
|
||||
"realm": "silly",
|
||||
"service": "silly",
|
||||
},
|
||||
},
|
||||
Notifications: Notifications{
|
||||
Endpoints: []Endpoint{
|
||||
{
|
||||
Name: "endpoint-1",
|
||||
URL: "http://example.com",
|
||||
Headers: http.Header{
|
||||
"Authorization": []string{"Bearer <example>"},
|
||||
},
|
||||
IgnoredMediaTypes: []string{"application/octet-stream"},
|
||||
Ignore: Ignore{
|
||||
MediaTypes: []string{"application/octet-stream"},
|
||||
Actions: []string{"pull"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Catalog: Catalog{
|
||||
MaxEntries: 1000,
|
||||
},
|
||||
HTTP: struct {
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
Net string `yaml:"net,omitempty"`
|
||||
Host string `yaml:"host,omitempty"`
|
||||
Prefix string `yaml:"prefix,omitempty"`
|
||||
Secret string `yaml:"secret,omitempty"`
|
||||
RelativeURLs bool `yaml:"relativeurls,omitempty"`
|
||||
DrainTimeout time.Duration `yaml:"draintimeout,omitempty"`
|
||||
TLS struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
DirectoryURL string `yaml:"directoryurl,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
} `yaml:"tls,omitempty"`
|
||||
Headers http.Header `yaml:"headers,omitempty"`
|
||||
Debug struct {
|
||||
Addr string `yaml:"addr,omitempty"`
|
||||
Prometheus struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
Path string `yaml:"path,omitempty"`
|
||||
} `yaml:"prometheus,omitempty"`
|
||||
} `yaml:"debug,omitempty"`
|
||||
HTTP2 struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
} `yaml:"http2,omitempty"`
|
||||
H2C struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
} `yaml:"h2c,omitempty"`
|
||||
}{
|
||||
TLS: struct {
|
||||
Certificate string `yaml:"certificate,omitempty"`
|
||||
Key string `yaml:"key,omitempty"`
|
||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
||||
MinimumTLS string `yaml:"minimumtls,omitempty"`
|
||||
CipherSuites []string `yaml:"ciphersuites,omitempty"`
|
||||
LetsEncrypt struct {
|
||||
CacheFile string `yaml:"cachefile,omitempty"`
|
||||
Email string `yaml:"email,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty"`
|
||||
DirectoryURL string `yaml:"directoryurl,omitempty"`
|
||||
} `yaml:"letsencrypt,omitempty"`
|
||||
}{
|
||||
ClientCAs: []string{"/path/to/ca.pem"},
|
||||
},
|
||||
Headers: http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
},
|
||||
HTTP2: struct {
|
||||
Disabled bool `yaml:"disabled,omitempty"`
|
||||
}{
|
||||
Disabled: false,
|
||||
},
|
||||
H2C: struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
}{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
Redis: Redis{
|
||||
Options: redis.UniversalOptions{
|
||||
Addrs: []string{"localhost:6379"},
|
||||
Username: "alice",
|
||||
Password: "123456",
|
||||
DB: 1,
|
||||
MaxIdleConns: 16,
|
||||
PoolSize: 64,
|
||||
ConnMaxIdleTime: time.Second * 300,
|
||||
DialTimeout: time.Millisecond * 10,
|
||||
ReadTimeout: time.Millisecond * 10,
|
||||
WriteTimeout: time.Millisecond * 10,
|
||||
},
|
||||
TLS: RedisTLSOptions{
|
||||
Certificate: "/foo/cert.crt",
|
||||
Key: "/foo/key.pem",
|
||||
ClientCAs: []string{"/path/to/ca.pem"},
|
||||
},
|
||||
},
|
||||
Validation: Validation{
|
||||
Manifests: ValidationManifests{
|
||||
Indexes: ValidationIndexes{
|
||||
Platforms: "none",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
|
||||
const configYamlV0_1 = `
|
||||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
fields:
|
||||
environment: test
|
||||
storage:
|
||||
somedriver:
|
||||
string1: string-value1
|
||||
string2: string-value2
|
||||
bool1: true
|
||||
bool2: false
|
||||
nil1: ~
|
||||
int1: 42
|
||||
url1: "https://foo.example.com"
|
||||
path1: "/some-path"
|
||||
tag:
|
||||
concurrencylimit: 10
|
||||
auth:
|
||||
silly:
|
||||
realm: silly
|
||||
service: silly
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: endpoint-1
|
||||
url: http://example.com
|
||||
headers:
|
||||
Authorization: [Bearer <example>]
|
||||
ignoredmediatypes:
|
||||
- application/octet-stream
|
||||
ignore:
|
||||
mediatypes:
|
||||
- application/octet-stream
|
||||
actions:
|
||||
- pull
|
||||
http:
|
||||
tls:
|
||||
clientcas:
|
||||
- /path/to/ca.pem
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
redis:
|
||||
tls:
|
||||
certificate: /foo/cert.crt
|
||||
key: /foo/key.pem
|
||||
clientcas:
|
||||
- /path/to/ca.pem
|
||||
addrs: [localhost:6379]
|
||||
username: alice
|
||||
password: "123456"
|
||||
db: 1
|
||||
maxidleconns: 16
|
||||
poolsize: 64
|
||||
connmaxidletime: 300s
|
||||
dialtimeout: 10ms
|
||||
readtimeout: 10ms
|
||||
writetimeout: 10ms
|
||||
validation:
|
||||
manifests:
|
||||
indexes:
|
||||
platforms: none
|
||||
`
|
||||
|
||||
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
|
||||
// storage driver with no parameters
|
||||
const inmemoryConfigYamlV0_1 = `
|
||||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
storage: inmemory
|
||||
auth:
|
||||
silly:
|
||||
realm: silly
|
||||
service: silly
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: endpoint-1
|
||||
url: http://example.com
|
||||
headers:
|
||||
Authorization: [Bearer <example>]
|
||||
ignoredmediatypes:
|
||||
- application/octet-stream
|
||||
ignore:
|
||||
mediatypes:
|
||||
- application/octet-stream
|
||||
actions:
|
||||
- pull
|
||||
http:
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
validation:
|
||||
manifests:
|
||||
indexes:
|
||||
platforms: none
|
||||
`
|
||||
|
||||
type ConfigSuite struct {
|
||||
suite.Suite
|
||||
expectedConfig *Configuration
|
||||
}
|
||||
|
||||
func TestConfigSuite(t *testing.T) {
|
||||
suite.Run(t, new(ConfigSuite))
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) SetupTest() {
|
||||
suite.expectedConfig = copyConfig(configStruct)
|
||||
}
|
||||
|
||||
// TestMarshalRoundtrip validates that configStruct can be marshaled and
|
||||
// unmarshaled without changing any parameters
|
||||
func (suite *ConfigSuite) TestMarshalRoundtrip() {
|
||||
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||
suite.Require().NoError(err)
|
||||
config, err := Parse(bytes.NewReader(configBytes))
|
||||
suite.T().Log(string(configBytes))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct
|
||||
// matching configStruct
|
||||
func (suite *ConfigSuite) TestParseSimple() {
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseInmemory validates that configuration yaml with storage provided as
|
||||
// a string can be parsed into a Configuration struct with no storage parameters
|
||||
func (suite *ConfigSuite) TestParseInmemory() {
|
||||
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||
suite.expectedConfig.Log.Fields = nil
|
||||
suite.expectedConfig.HTTP.TLS.ClientCAs = nil
|
||||
suite.expectedConfig.Redis = Redis{}
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseIncomplete validates that an incomplete yaml configuration cannot
|
||||
// be parsed without providing environment variables to fill in the missing
|
||||
// components.
|
||||
func (suite *ConfigSuite) TestParseIncomplete() {
|
||||
incompleteConfigYaml := "version: 0.1"
|
||||
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||
suite.Require().Error(err)
|
||||
|
||||
suite.expectedConfig.Log.Fields = nil
|
||||
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
|
||||
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
|
||||
suite.expectedConfig.Notifications = Notifications{}
|
||||
suite.expectedConfig.HTTP.Headers = nil
|
||||
suite.expectedConfig.HTTP.TLS.ClientCAs = nil
|
||||
suite.expectedConfig.Redis = Redis{}
|
||||
suite.expectedConfig.Validation.Manifests.Indexes.Platforms = ""
|
||||
|
||||
// Note: this also tests that REGISTRY_STORAGE and
|
||||
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
|
||||
suite.T().Setenv("REGISTRY_STORAGE", "filesystem")
|
||||
suite.T().Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||
suite.T().Setenv("REGISTRY_AUTH", "silly")
|
||||
suite.T().Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithSameEnvStorage validates that providing environment variables
|
||||
// that match the given storage type will only include environment-defined
|
||||
// parameters and remove yaml-defined parameters
|
||||
func (suite *ConfigSuite) TestParseWithSameEnvStorage() {
|
||||
suite.expectedConfig.Storage = Storage{"somedriver": Parameters{"region": "us-east-1"}}
|
||||
|
||||
suite.T().Setenv("REGISTRY_STORAGE", "somedriver")
|
||||
suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_REGION", "us-east-1")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
|
||||
// and add to the given storage parameters will change and add parameters to the parsed
|
||||
// Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams() {
|
||||
suite.expectedConfig.Storage.setParameter("string1", "us-west-1")
|
||||
suite.expectedConfig.Storage.setParameter("bool1", true)
|
||||
suite.expectedConfig.Storage.setParameter("newparam", "some Value")
|
||||
|
||||
suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_STRING1", "us-west-1")
|
||||
suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_BOOL1", "true")
|
||||
suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_NEWPARAM", "some Value")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that
|
||||
// changes the storage type will be reflected in the parsed Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType() {
|
||||
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||
|
||||
suite.T().Setenv("REGISTRY_STORAGE", "inmemory")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
|
||||
// that changes the storage type will be reflected in the parsed Configuration struct and that
|
||||
// environment storage parameters will also be included
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams() {
|
||||
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
|
||||
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
|
||||
|
||||
suite.T().Setenv("REGISTRY_STORAGE", "filesystem")
|
||||
suite.T().Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
|
||||
// level to the same as the one provided in the yaml will not change the parsed Configuration struct
|
||||
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel() {
|
||||
suite.T().Setenv("REGISTRY_LOGLEVEL", "info")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
|
||||
// log level will override the value provided in the yaml document
|
||||
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel() {
|
||||
suite.expectedConfig.Log.Level = "error"
|
||||
|
||||
suite.T().Setenv("REGISTRY_LOG_LEVEL", "error")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseInvalidLoglevel validates that the parser will fail to parse a
|
||||
// configuration if the loglevel is malformed
|
||||
func (suite *ConfigSuite) TestParseInvalidLoglevel() {
|
||||
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
|
||||
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
|
||||
suite.Require().Error(err)
|
||||
|
||||
suite.T().Setenv("REGISTRY_LOGLEVEL", "derp")
|
||||
|
||||
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().Error(err)
|
||||
}
|
||||
|
||||
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
|
||||
// version than the CurrentVersion
|
||||
func (suite *ConfigSuite) TestParseInvalidVersion() {
|
||||
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
|
||||
configBytes, err := yaml.Marshal(suite.expectedConfig)
|
||||
suite.Require().NoError(err)
|
||||
_, err = Parse(bytes.NewReader(configBytes))
|
||||
suite.Require().Error(err)
|
||||
}
|
||||
|
||||
// TestParseExtraneousVars validates that environment variables referring to
|
||||
// nonexistent variables don't cause side effects.
|
||||
func (suite *ConfigSuite) TestParseExtraneousVars() {
|
||||
// Environment variables which shouldn't set config items
|
||||
suite.T().Setenv("REGISTRY_DUCKS", "quack")
|
||||
suite.T().Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseEnvVarImplicitMaps validates that environment variables can set
|
||||
// values in maps that don't already exist.
|
||||
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps() {
|
||||
readonly := make(map[string]interface{})
|
||||
readonly["enabled"] = true
|
||||
|
||||
maintenance := make(map[string]interface{})
|
||||
maintenance["readonly"] = readonly
|
||||
|
||||
suite.expectedConfig.Storage["maintenance"] = maintenance
|
||||
|
||||
suite.T().Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true")
|
||||
|
||||
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(suite.expectedConfig, config)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a
|
||||
// string over existing map fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeMap() {
|
||||
suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().Error(err)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeStruct validates that incorrectly attempting to
|
||||
// unmarshal a string into a struct fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct() {
|
||||
suite.T().Setenv("REGISTRY_STORAGE_LOG", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().Error(err)
|
||||
}
|
||||
|
||||
// TestParseEnvWrongTypeSlice validates that incorrectly attempting to
|
||||
// unmarshal a string into a slice fails.
|
||||
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice() {
|
||||
suite.T().Setenv("REGISTRY_LOG_HOOKS", "somestring")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().Error(err)
|
||||
}
|
||||
|
||||
// TestParseEnvMany tests several environment variable overrides.
|
||||
// The result is not checked - the goal of this test is to detect panics
|
||||
// from misuse of reflection.
|
||||
func (suite *ConfigSuite) TestParseEnvMany() {
|
||||
suite.T().Setenv("REGISTRY_VERSION", "0.1")
|
||||
suite.T().Setenv("REGISTRY_LOG_LEVEL", "debug")
|
||||
suite.T().Setenv("REGISTRY_LOG_FORMATTER", "json")
|
||||
suite.T().Setenv("REGISTRY_LOG_HOOKS", "json")
|
||||
suite.T().Setenv("REGISTRY_LOG_FIELDS", "abc: xyz")
|
||||
suite.T().Setenv("REGISTRY_LOG_HOOKS", "- type: asdf")
|
||||
suite.T().Setenv("REGISTRY_LOGLEVEL", "debug")
|
||||
suite.T().Setenv("REGISTRY_STORAGE", "somedriver")
|
||||
suite.T().Setenv("REGISTRY_AUTH_PARAMS", "param1: value1")
|
||||
suite.T().Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
|
||||
suite.T().Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
|
||||
|
||||
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
|
||||
suite.Require().NoError(err)
|
||||
}
|
||||
|
||||
func checkStructs(tt *testing.T, t reflect.Type, structsChecked map[string]struct{}) {
|
||||
tt.Helper()
|
||||
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
if _, present := structsChecked[t.String()]; present {
|
||||
// Already checked this type
|
||||
return
|
||||
}
|
||||
|
||||
structsChecked[t.String()] = struct{}{}
|
||||
|
||||
byUpperCase := make(map[string]int)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
sf := t.Field(i)
|
||||
|
||||
// Check that the yaml tag does not contain an _.
|
||||
yamlTag := sf.Tag.Get("yaml")
|
||||
if strings.Contains(yamlTag, "_") {
|
||||
tt.Fatalf("yaml field name includes _ character: %s", yamlTag)
|
||||
}
|
||||
upper := strings.ToUpper(sf.Name)
|
||||
if _, present := byUpperCase[upper]; present {
|
||||
tt.Fatalf("field name collision in configuration object: %s", sf.Name)
|
||||
}
|
||||
byUpperCase[upper] = i
|
||||
|
||||
checkStructs(tt, sf.Type, structsChecked)
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateConfigStruct makes sure that the config struct has no members
|
||||
// with yaml tags that would be ambiguous to the environment variable parser.
|
||||
func (suite *ConfigSuite) TestValidateConfigStruct() {
|
||||
structsChecked := make(map[string]struct{})
|
||||
checkStructs(suite.T(), reflect.TypeOf(Configuration{}), structsChecked)
|
||||
}
|
||||
|
||||
func copyConfig(config Configuration) *Configuration {
|
||||
configCopy := new(Configuration)
|
||||
|
||||
configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor())
|
||||
configCopy.Loglevel = config.Loglevel
|
||||
configCopy.Log = config.Log
|
||||
configCopy.Catalog = config.Catalog
|
||||
configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields))
|
||||
for k, v := range config.Log.Fields {
|
||||
configCopy.Log.Fields[k] = v
|
||||
}
|
||||
|
||||
configCopy.Storage = Storage{config.Storage.Type(): Parameters{}}
|
||||
for k, v := range config.Storage.Parameters() {
|
||||
configCopy.Storage.setParameter(k, v)
|
||||
}
|
||||
for k, v := range config.Storage.TagParameters() {
|
||||
configCopy.Storage.setTagParameter(k, v)
|
||||
}
|
||||
|
||||
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
|
||||
for k, v := range config.Auth.Parameters() {
|
||||
configCopy.Auth.setParameter(k, v)
|
||||
}
|
||||
|
||||
configCopy.Notifications = Notifications{Endpoints: []Endpoint{}}
|
||||
configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, config.Notifications.Endpoints...)
|
||||
|
||||
configCopy.HTTP.Headers = make(http.Header)
|
||||
for k, v := range config.HTTP.Headers {
|
||||
configCopy.HTTP.Headers[k] = v
|
||||
}
|
||||
configCopy.HTTP.TLS.ClientCAs = make([]string, 0, len(config.HTTP.TLS.ClientCAs))
|
||||
configCopy.HTTP.TLS.ClientCAs = append(configCopy.HTTP.TLS.ClientCAs, config.HTTP.TLS.ClientCAs...)
|
||||
|
||||
configCopy.Redis = config.Redis
|
||||
configCopy.Redis.TLS.Certificate = config.Redis.TLS.Certificate
|
||||
configCopy.Redis.TLS.Key = config.Redis.TLS.Key
|
||||
configCopy.Redis.TLS.ClientCAs = make([]string, 0, len(config.Redis.TLS.ClientCAs))
|
||||
configCopy.Redis.TLS.ClientCAs = append(configCopy.Redis.TLS.ClientCAs, config.Redis.TLS.ClientCAs...)
|
||||
|
||||
configCopy.Validation = Validation{
|
||||
Enabled: config.Validation.Enabled,
|
||||
Disabled: config.Validation.Disabled,
|
||||
Manifests: config.Validation.Manifests,
|
||||
}
|
||||
|
||||
return configCopy
|
||||
}
|
15
configuration/fuzz_test.go
Normal file
15
configuration/fuzz_test.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// ParserFuzzer implements a fuzzer that targets Parser()
|
||||
// nolint:deadcode
|
||||
func FuzzConfigurationParse(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
rd := bytes.NewReader(data)
|
||||
_, _ = Parse(rd)
|
||||
})
|
||||
}
|
302
configuration/parser.go
Normal file
302
configuration/parser.go
Normal file
|
@ -0,0 +1,302 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Version is a major/minor version pair of the form Major.Minor
|
||||
// Major version upgrades indicate structure or type changes
|
||||
// Minor version upgrades should be strictly additive
|
||||
type Version string
|
||||
|
||||
// MajorMinorVersion constructs a Version from its Major and Minor components
|
||||
func MajorMinorVersion(major, minor uint) Version {
|
||||
return Version(fmt.Sprintf("%d.%d", major, minor))
|
||||
}
|
||||
|
||||
func (version Version) major() (uint, error) {
|
||||
majorPart, _, _ := strings.Cut(string(version), ".")
|
||||
major, err := strconv.ParseUint(majorPart, 10, 0)
|
||||
return uint(major), err
|
||||
}
|
||||
|
||||
// Major returns the major version portion of a Version
|
||||
func (version Version) Major() uint {
|
||||
major, _ := version.major()
|
||||
return major
|
||||
}
|
||||
|
||||
func (version Version) minor() (uint, error) {
|
||||
_, minorPart, _ := strings.Cut(string(version), ".")
|
||||
minor, err := strconv.ParseUint(minorPart, 10, 0)
|
||||
return uint(minor), err
|
||||
}
|
||||
|
||||
// Minor returns the minor version portion of a Version
|
||||
func (version Version) Minor() uint {
|
||||
minor, _ := version.minor()
|
||||
return minor
|
||||
}
|
||||
|
||||
// VersionedParseInfo defines how a specific version of a configuration should
|
||||
// be parsed into the current version
|
||||
type VersionedParseInfo struct {
|
||||
// Version is the version which this parsing information relates to
|
||||
Version Version
|
||||
// ParseAs defines the type which a configuration file of this version
|
||||
// should be parsed into
|
||||
ParseAs reflect.Type
|
||||
// ConversionFunc defines a method for converting the parsed configuration
|
||||
// (of type ParseAs) into the current configuration version
|
||||
// Note: this method signature is very unclear with the absence of generics
|
||||
ConversionFunc func(interface{}) (interface{}, error)
|
||||
}
|
||||
|
||||
type envVar struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
type envVars []envVar
|
||||
|
||||
func (a envVars) Len() int { return len(a) }
|
||||
func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name }
|
||||
|
||||
// Parser can be used to parse a configuration file and environment of a defined
|
||||
// version into a unified output structure
|
||||
type Parser struct {
|
||||
prefix string
|
||||
mapping map[Version]VersionedParseInfo
|
||||
env envVars
|
||||
}
|
||||
|
||||
// NewParser returns a *Parser with the given environment prefix which handles
|
||||
// versioned configurations which match the given parseInfos
|
||||
func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
|
||||
p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)}
|
||||
|
||||
for _, parseInfo := range parseInfos {
|
||||
p.mapping[parseInfo.Version] = parseInfo
|
||||
}
|
||||
|
||||
for _, env := range os.Environ() {
|
||||
k, v, _ := strings.Cut(env, "=")
|
||||
p.env = append(p.env, envVar{k, v})
|
||||
}
|
||||
|
||||
// We must sort the environment variables lexically by name so that
|
||||
// more specific variables are applied before less specific ones
|
||||
// (i.e. REGISTRY_STORAGE before
|
||||
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a
|
||||
// lot simpler and easier to get right than unmarshalling map entries
|
||||
// into temporaries and merging with the existing entry.
|
||||
sort.Sort(p.env)
|
||||
|
||||
return &p
|
||||
}
|
||||
|
||||
// Parse reads in the given []byte and environment and writes the resulting
|
||||
// configuration into the input v
|
||||
//
|
||||
// Environment variables may be used to override configuration parameters other
|
||||
// than version, following the scheme below:
|
||||
// v.Abc may be replaced by the value of PREFIX_ABC,
|
||||
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth
|
||||
func (p *Parser) Parse(in []byte, v interface{}) error {
|
||||
var versionedStruct struct {
|
||||
Version Version
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(in, &versionedStruct); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parseInfo, ok := p.mapping[versionedStruct.Version]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported version: %q", versionedStruct.Version)
|
||||
}
|
||||
|
||||
parseAs := reflect.New(parseInfo.ParseAs)
|
||||
err := yaml.Unmarshal(in, parseAs.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, envVar := range p.env {
|
||||
pathStr := envVar.name
|
||||
if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") {
|
||||
path := strings.Split(pathStr, "_")
|
||||
|
||||
err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing environment variable %s: %v", pathStr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c, err := parseInfo.ConversionFunc(parseAs.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// overwriteFields replaces configuration values with alternate values specified
|
||||
// through the environment. Precondition: an empty path slice must never be
|
||||
// passed in.
|
||||
func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
panic("encountered nil pointer while handling environment variable " + fullpath)
|
||||
}
|
||||
v = reflect.Indirect(v)
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
return p.overwriteStruct(v, fullpath, path, payload)
|
||||
case reflect.Map:
|
||||
return p.overwriteMap(v, fullpath, path, payload)
|
||||
case reflect.Slice:
|
||||
idx, err := strconv.Atoi(path[0])
|
||||
if err != nil {
|
||||
panic("non-numeric index: " + path[0])
|
||||
}
|
||||
|
||||
if idx > v.Len() {
|
||||
panic("undefined index: " + path[0])
|
||||
}
|
||||
|
||||
// if there is no element or the current slice length
|
||||
// is the same as the indexed variable create a new element,
|
||||
// append it and then set it to the passed in env var value.
|
||||
if v.Len() == 0 || idx == v.Len() {
|
||||
typ := v.Type().Elem()
|
||||
elem := reflect.New(typ).Elem()
|
||||
v.Set(reflect.Append(v, elem))
|
||||
}
|
||||
return p.overwriteFields(v.Index(idx), fullpath, path[1:], payload)
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 {
|
||||
if !v.IsNil() {
|
||||
return p.overwriteFields(v.Elem(), fullpath, path, payload)
|
||||
}
|
||||
// Interface was empty; create an implicit map
|
||||
var template map[string]interface{}
|
||||
wrappedV := reflect.MakeMap(reflect.TypeOf(template))
|
||||
v.Set(wrappedV)
|
||||
return p.overwriteMap(wrappedV, fullpath, path, payload)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error {
|
||||
// Generate case-insensitive map of struct fields
|
||||
byUpperCase := make(map[string]int)
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
sf := v.Type().Field(i)
|
||||
upper := strings.ToUpper(sf.Name)
|
||||
if _, present := byUpperCase[upper]; present {
|
||||
panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name))
|
||||
}
|
||||
byUpperCase[upper] = i
|
||||
}
|
||||
|
||||
fieldIndex, present := byUpperCase[path[0]]
|
||||
if !present {
|
||||
logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath)
|
||||
return nil
|
||||
}
|
||||
field := v.Field(fieldIndex)
|
||||
sf := v.Type().Field(fieldIndex)
|
||||
|
||||
if len(path) == 1 {
|
||||
// Env var specifies this field directly
|
||||
fieldVal := reflect.New(sf.Type)
|
||||
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field.Set(reflect.Indirect(fieldVal))
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the field is nil, must create an object
|
||||
switch sf.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if field.IsNil() {
|
||||
field.Set(reflect.MakeMap(sf.Type))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if field.IsNil() {
|
||||
field.Set(reflect.New(field.Type().Elem()))
|
||||
}
|
||||
}
|
||||
|
||||
err := p.overwriteFields(field, fullpath, path[1:], payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error {
|
||||
if m.Type().Key().Kind() != reflect.String {
|
||||
// non-string keys unsupported
|
||||
logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(path) > 1 {
|
||||
// If a matching key exists, get its value and continue the
|
||||
// overwriting process.
|
||||
for _, k := range m.MapKeys() {
|
||||
if strings.ToUpper(k.String()) == path[0] {
|
||||
mapValue := m.MapIndex(k)
|
||||
// If the existing value is nil, we want to
|
||||
// recreate it instead of using this value.
|
||||
if (mapValue.Kind() == reflect.Ptr ||
|
||||
mapValue.Kind() == reflect.Interface ||
|
||||
mapValue.Kind() == reflect.Map) &&
|
||||
mapValue.IsNil() {
|
||||
break
|
||||
}
|
||||
return p.overwriteFields(mapValue, fullpath, path[1:], payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// (Re)create this key
|
||||
var mapValue reflect.Value
|
||||
if m.Type().Elem().Kind() == reflect.Map {
|
||||
mapValue = reflect.MakeMap(m.Type().Elem())
|
||||
} else {
|
||||
mapValue = reflect.New(m.Type().Elem())
|
||||
}
|
||||
if len(path) > 1 {
|
||||
err := p.overwriteFields(mapValue, fullpath, path[1:], payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := yaml.Unmarshal([]byte(payload), mapValue.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue))
|
||||
|
||||
return nil
|
||||
}
|
95
configuration/parser_test.go
Normal file
95
configuration/parser_test.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type localConfiguration struct {
|
||||
Version Version `yaml:"version"`
|
||||
Log *Log `yaml:"log"`
|
||||
Notifications []Notif `yaml:"notifications,omitempty"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
Formatter string `yaml:"formatter,omitempty"`
|
||||
}
|
||||
|
||||
type Notif struct {
|
||||
Name string `yaml:"name"`
|
||||
}
|
||||
|
||||
var expectedConfig = localConfiguration{
|
||||
Version: "0.1",
|
||||
Log: &Log{
|
||||
Formatter: "json",
|
||||
},
|
||||
Notifications: []Notif{
|
||||
{Name: "foo"},
|
||||
{Name: "bar"},
|
||||
{Name: "car"},
|
||||
},
|
||||
}
|
||||
|
||||
const testConfig = `version: "0.1"
|
||||
log:
|
||||
formatter: "text"
|
||||
notifications:
|
||||
- name: "foo"
|
||||
- name: "bar"
|
||||
- name: "car"`
|
||||
|
||||
func TestParserOverwriteIninitializedPoiner(t *testing.T) {
|
||||
config := localConfiguration{}
|
||||
|
||||
t.Setenv("REGISTRY_LOG_FORMATTER", "json")
|
||||
|
||||
p := NewParser("registry", []VersionedParseInfo{
|
||||
{
|
||||
Version: "0.1",
|
||||
ParseAs: reflect.TypeOf(config),
|
||||
ConversionFunc: func(c interface{}) (interface{}, error) {
|
||||
return c, nil
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
err := p.Parse([]byte(testConfig), &config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConfig, config)
|
||||
}
|
||||
|
||||
const testConfig2 = `version: "0.1"
|
||||
log:
|
||||
formatter: "text"
|
||||
notifications:
|
||||
- name: "val1"
|
||||
- name: "val2"
|
||||
- name: "car"`
|
||||
|
||||
func TestParseOverwriteUnininitializedPoiner(t *testing.T) {
|
||||
config := localConfiguration{}
|
||||
|
||||
t.Setenv("REGISTRY_LOG_FORMATTER", "json")
|
||||
|
||||
// override only first two notificationsvalues
|
||||
// in the tetConfig: leave the last value unchanged.
|
||||
t.Setenv("REGISTRY_NOTIFICATIONS_0_NAME", "foo")
|
||||
t.Setenv("REGISTRY_NOTIFICATIONS_1_NAME", "bar")
|
||||
|
||||
p := NewParser("registry", []VersionedParseInfo{
|
||||
{
|
||||
Version: "0.1",
|
||||
ParseAs: reflect.TypeOf(config),
|
||||
ConversionFunc: func(c interface{}) (interface{}, error) {
|
||||
return c, nil
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
err := p.Parse([]byte(testConfig2), &config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConfig, config)
|
||||
}
|
51
digestset/deprecated.go
Normal file
51
digestset/deprecated.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
package digestset
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/go-digest/digestset"
|
||||
)
|
||||
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
//
|
||||
// Deprecated: use [digestset.ErrDigestNotFound].
|
||||
var ErrDigestNotFound = digestset.ErrDigestNotFound
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
//
|
||||
// Deprecated: use [digestset.ErrDigestAmbiguous].
|
||||
var ErrDigestAmbiguous = digestset.ErrDigestAmbiguous
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
//
|
||||
// Deprecated: use [digestset.Set].
|
||||
type Set = digestset.Set
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
//
|
||||
// Deprecated: use [digestset.NewSet].
|
||||
func NewSet() *digestset.Set {
|
||||
return digestset.NewSet()
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
//
|
||||
// Deprecated: use [digestset.ShortCodeTable].
|
||||
func ShortCodeTable(dst *digestset.Set, length int) map[digest.Digest]string {
|
||||
return digestset.ShortCodeTable(dst, length)
|
||||
}
|
1
distribution-logo.svg
Normal file
1
distribution-logo.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 8.6 KiB |
7
doc.go
Normal file
7
doc.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// Package distribution will define the interfaces for the components of
|
||||
// docker distribution. The goal is to allow users to reliably package, ship
|
||||
// and store content related to container images.
|
||||
//
|
||||
// This is currently a work in progress. More details are available in the
|
||||
// README.md.
|
||||
package distribution
|
127
docker-bake.hcl
Normal file
127
docker-bake.hcl
Normal file
|
@ -0,0 +1,127 @@
|
|||
group "default" {
|
||||
targets = ["image-local"]
|
||||
}
|
||||
|
||||
group "validate" {
|
||||
targets = ["lint", "validate-git", "validate-vendor"]
|
||||
}
|
||||
|
||||
target "lint" {
|
||||
dockerfile = "./dockerfiles/lint.Dockerfile"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
variable "COMMIT_RANGE" {
|
||||
default = ""
|
||||
}
|
||||
target "validate-git" {
|
||||
dockerfile = "./dockerfiles/git.Dockerfile"
|
||||
target = "validate"
|
||||
args = {
|
||||
COMMIT_RANGE = COMMIT_RANGE
|
||||
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
|
||||
}
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "validate-vendor" {
|
||||
dockerfile = "./dockerfiles/vendor.Dockerfile"
|
||||
target = "validate"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "update-vendor" {
|
||||
dockerfile = "./dockerfiles/vendor.Dockerfile"
|
||||
target = "update"
|
||||
output = ["."]
|
||||
}
|
||||
|
||||
target "mod-outdated" {
|
||||
dockerfile = "./dockerfiles/vendor.Dockerfile"
|
||||
target = "outdated"
|
||||
no-cache-filter = ["outdated"]
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "binary" {
|
||||
target = "binary"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
||||
target "artifact" {
|
||||
target = "artifact"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
||||
target "artifact-all" {
|
||||
inherits = ["artifact"]
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm/v6",
|
||||
"linux/arm/v7",
|
||||
"linux/arm64",
|
||||
"linux/ppc64le",
|
||||
"linux/s390x"
|
||||
]
|
||||
}
|
||||
|
||||
// Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
target "docker-metadata-action" {
|
||||
tags = ["registry:local"]
|
||||
}
|
||||
|
||||
target "image" {
|
||||
inherits = ["docker-metadata-action"]
|
||||
}
|
||||
|
||||
target "image-local" {
|
||||
inherits = ["image"]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
target "image-all" {
|
||||
inherits = ["image"]
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm/v6",
|
||||
"linux/arm/v7",
|
||||
"linux/arm64",
|
||||
"linux/ppc64le",
|
||||
"linux/s390x"
|
||||
]
|
||||
}
|
||||
|
||||
target "_common_docs" {
|
||||
dockerfile = "./dockerfiles/docs.Dockerfile"
|
||||
}
|
||||
|
||||
target "docs-export" {
|
||||
inherits = ["_common_docs"]
|
||||
target = "out"
|
||||
output = ["type=local,dest=build/docs"]
|
||||
}
|
||||
|
||||
target "docs-image" {
|
||||
inherits = ["_common_docs"]
|
||||
target = "server"
|
||||
output = ["type=docker"]
|
||||
tags = ["registry-docs:local"]
|
||||
}
|
||||
|
||||
target "docs-test" {
|
||||
inherits = ["_common_docs"]
|
||||
target = "test"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "authors" {
|
||||
dockerfile = "./dockerfiles/authors.Dockerfile"
|
||||
target = "update"
|
||||
output = ["."]
|
||||
}
|
||||
|
||||
target "validate-authors" {
|
||||
dockerfile = "./dockerfiles/authors.Dockerfile"
|
||||
target = "validate"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
34
dockerfiles/authors.Dockerfile
Normal file
34
dockerfiles/authors.Dockerfile
Normal file
|
@ -0,0 +1,34 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS gen
|
||||
RUN apk add --no-cache git
|
||||
WORKDIR /src
|
||||
RUN --mount=type=bind,target=. <<EOT
|
||||
set -e
|
||||
mkdir /out
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
{
|
||||
echo "# This file lists all individuals having contributed content to the repository."
|
||||
echo "# For how it is generated, see dockerfiles/authors.Dockerfile."
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > /out/AUTHORS
|
||||
cat /out/AUTHORS
|
||||
EOT
|
||||
|
||||
FROM scratch AS update
|
||||
COPY --from=gen /out /
|
||||
|
||||
FROM gen AS validate
|
||||
RUN --mount=type=bind,target=.,rw <<EOT
|
||||
set -e
|
||||
git add -A
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||
git status --porcelain -- AUTHORS
|
||||
exit 1
|
||||
fi
|
||||
EOT
|
39
dockerfiles/docs.Dockerfile
Normal file
39
dockerfiles/docs.Dockerfile
Normal file
|
@ -0,0 +1,39 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.4
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
RUN apk add --no-cache git
|
||||
|
||||
FROM base AS hugo
|
||||
ARG HUGO_VERSION=0.119.0
|
||||
RUN --mount=type=cache,target=/go/mod/pkg \
|
||||
go install github.com/gohugoio/hugo@v${HUGO_VERSION}
|
||||
|
||||
FROM base AS build-base
|
||||
COPY --from=hugo $GOPATH/bin/hugo /bin/hugo
|
||||
WORKDIR /src
|
||||
|
||||
FROM build-base AS build
|
||||
RUN --mount=type=bind,rw,source=docs,target=. \
|
||||
hugo --gc --minify --destination /out
|
||||
|
||||
FROM build-base AS server
|
||||
COPY docs .
|
||||
ENTRYPOINT [ "hugo", "server", "--bind", "0.0.0.0" ]
|
||||
EXPOSE 1313
|
||||
|
||||
FROM scratch AS out
|
||||
COPY --from=build /out /
|
||||
|
||||
FROM wjdp/htmltest:v0.17.0 AS test
|
||||
# Copy the site to a public/distribution subdirectory
|
||||
# This is a workaround for a limitation in htmltest, see:
|
||||
# https://github.com/wjdp/htmltest/issues/45
|
||||
WORKDIR /test/public/distribution
|
||||
COPY --from=build /out .
|
||||
WORKDIR /test
|
||||
ADD docs/.htmltest.yml .htmltest.yml
|
||||
RUN --mount=type=cache,target=tmp/.htmltest \
|
||||
htmltest
|
23
dockerfiles/git.Dockerfile
Normal file
23
dockerfiles/git.Dockerfile
Normal file
|
@ -0,0 +1,23 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.4
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM alpine:${ALPINE_VERSION} AS base
|
||||
RUN apk add --no-cache git gpg
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS gitvalidation
|
||||
ARG GIT_VALIDATION_VERSION=v1.1.0
|
||||
RUN --mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/out go install "github.com/vbatts/git-validation@${GIT_VALIDATION_VERSION}"
|
||||
|
||||
FROM base AS validate
|
||||
ARG COMMIT_RANGE
|
||||
RUN if [ -z "$COMMIT_RANGE" ]; then echo "COMMIT_RANGE required" && exit 1; fi
|
||||
ENV GIT_CHECK_EXCLUDE="./vendor"
|
||||
WORKDIR /src
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=from=gitvalidation,source=/out/git-validation,target=/usr/bin/git-validation \
|
||||
git-validation -q -range "${COMMIT_RANGE}" -run short-subject,dangling-whitespace
|
19
dockerfiles/lint.Dockerfile
Normal file
19
dockerfiles/lint.Dockerfile
Normal file
|
@ -0,0 +1,19 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.4
|
||||
ARG ALPINE_VERSION=3.20
|
||||
ARG GOLANGCI_LINT_VERSION=v1.59.1
|
||||
ARG BUILDTAGS=""
|
||||
|
||||
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
RUN apk add --no-cache gcc musl-dev
|
||||
WORKDIR /src
|
||||
|
||||
FROM base
|
||||
ENV GOFLAGS="-buildvcs=false"
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=from=golangci-lint,source=/usr/bin/golangci-lint,target=/usr/bin/golangci-lint \
|
||||
golangci-lint --build-tags "${BUILDTAGS}" run
|
46
dockerfiles/vendor.Dockerfile
Normal file
46
dockerfiles/vendor.Dockerfile
Normal file
|
@ -0,0 +1,46 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.22.4
|
||||
ARG ALPINE_VERSION=3.20
|
||||
ARG MODOUTDATED_VERSION=v0.8.0
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
RUN apk add --no-cache git rsync
|
||||
WORKDIR /src
|
||||
|
||||
FROM base AS vendored
|
||||
RUN --mount=target=/context \
|
||||
--mount=target=.,type=tmpfs \
|
||||
--mount=target=/go/pkg/mod,type=cache <<EOT
|
||||
set -e
|
||||
rsync -a /context/. .
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
mkdir /out
|
||||
cp -r go.mod go.sum vendor /out
|
||||
EOT
|
||||
|
||||
FROM scratch AS update
|
||||
COPY --from=vendored /out /out
|
||||
|
||||
FROM vendored AS validate
|
||||
RUN --mount=target=/context \
|
||||
--mount=target=.,type=tmpfs <<EOT
|
||||
set -e
|
||||
rsync -a /context/. .
|
||||
git add -A
|
||||
rm -rf vendor
|
||||
cp -rf /out/* .
|
||||
if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
|
||||
echo >&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"'
|
||||
git status --porcelain -- go.mod go.sum vendor
|
||||
exit 1
|
||||
fi
|
||||
EOT
|
||||
|
||||
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
||||
FROM base AS outdated
|
||||
RUN --mount=target=.,ro \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||
go list -mod=readonly -u -m -json all | go-mod-outdated -update -direct
|
9
docs/.htmltest.yml
Normal file
9
docs/.htmltest.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
DirectoryPath: "public"
|
||||
EnforceHTTPS: true
|
||||
CheckDoctype: true
|
||||
CheckExternal: true
|
||||
IgnoreAltMissing: true
|
||||
IgnoreAltEmpty: true
|
||||
IgnoreEmptyHref: true
|
||||
IgnoreInternalEmptyHash: true
|
||||
IgnoreDirectoryMissingTrailingSlash: true
|
77
docs/content/_index.md
Normal file
77
docs/content/_index.md
Normal file
|
@ -0,0 +1,77 @@
|
|||
---
|
||||
description: High-level overview of the Registry
|
||||
keywords: registry, on-prem, images, tags, repository, distribution
|
||||
title: Distribution Registry
|
||||
---
|
||||
|
||||
## What it is
|
||||
|
||||
The Registry is a stateless, highly scalable server side application that stores
|
||||
and lets you distribute container images and other content. The Registry is open-source, under the
|
||||
permissive [Apache license](https://en.wikipedia.org/wiki/Apache_License).
|
||||
|
||||
## Why use it
|
||||
|
||||
You should use the Registry if you want to:
|
||||
|
||||
* tightly control where your images are being stored
|
||||
* fully own your images distribution pipeline
|
||||
* integrate image storage and distribution tightly into your in-house development workflow
|
||||
|
||||
## Alternatives
|
||||
|
||||
Users looking for a zero maintenance, ready-to-go solution are encouraged to
|
||||
use one of the existing registry services. Many of these provide support and security
|
||||
scanning, and are free for public repositories. For example:
|
||||
- [Docker Hub](https://hub.docker.com)
|
||||
- [Quay.io](https://quay.io/)
|
||||
- [GitHub Packages](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry)
|
||||
|
||||
Cloud infrastructure providers such as [AWS](https://aws.amazon.com/ecr/), [Azure](https://azure.microsoft.com/products/container-registry/), [Google Cloud](https://cloud.google.com/artifact-registry) and [IBM Cloud](https://www.ibm.com/products/container-registry) also have container registry services available at a cost.
|
||||
|
||||
## Compatibility
|
||||
|
||||
The distribution registry implements the [OCI Distribution Spec](https://github.com/opencontainers/distribution-spec) version 1.0.1.
|
||||
|
||||
## Basic commands
|
||||
|
||||
Start your registry
|
||||
|
||||
```sh
|
||||
docker run -d -p 5000:5000 --name registry registry:2
|
||||
```
|
||||
|
||||
Pull (or build) some image from the hub
|
||||
|
||||
```sh
|
||||
docker pull ubuntu
|
||||
```
|
||||
|
||||
Tag the image so that it points to your registry
|
||||
|
||||
```sh
|
||||
docker image tag ubuntu localhost:5000/myfirstimage
|
||||
```
|
||||
|
||||
Push it
|
||||
|
||||
```sh
|
||||
docker push localhost:5000/myfirstimage
|
||||
```
|
||||
|
||||
Pull it back
|
||||
|
||||
```sh
|
||||
docker pull localhost:5000/myfirstimage
|
||||
```
|
||||
|
||||
Now stop your registry and remove all data
|
||||
|
||||
```sh
|
||||
docker container stop registry && docker container rm -v registry
|
||||
```
|
||||
|
||||
## Next
|
||||
|
||||
You should now read the [detailed introduction about the registry](about),
|
||||
or jump directly to [deployment instructions](about/deploying).
|
73
docs/content/about/_index.md
Normal file
73
docs/content/about/_index.md
Normal file
|
@ -0,0 +1,73 @@
|
|||
---
|
||||
description: Explains what the Registry is, basic use cases and requirements
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, use cases, requirements
|
||||
title: About Registry
|
||||
---
|
||||
|
||||
A registry is a storage and content delivery system, holding named container
|
||||
images and other content, available in different tagged versions.
|
||||
|
||||
> Example: the image `distribution/registry`, with tags `2.0` and `2.1`.
|
||||
|
||||
Users interact with a registry by pushing and pulling images.
|
||||
|
||||
> Example: `docker pull registry-1.docker.io/distribution/registry:2.1`.
|
||||
|
||||
Storage itself is delegated to drivers. The default storage driver is the local
|
||||
posix filesystem, which is suitable for development or small deployments.
|
||||
Additional cloud-based storage drivers like S3, Microsoft Azure and Google Cloud Storage
|
||||
are supported. People looking into using other storage drivers should consider if
|
||||
the driver they'd like to be supported is S3 compatible like many cloud storage systems
|
||||
as adding new storage driver support has been put on hold for the time being.
|
||||
|
||||
Since securing access to your hosted images is paramount, the Registry natively
|
||||
supports TLS and basic authentication.
|
||||
|
||||
The Registry GitHub repository includes additional information about advanced
|
||||
authentication and authorization methods. Only very large or public deployments
|
||||
are expected to extend the Registry in this way.
|
||||
|
||||
Finally, the Registry ships with a robust [notification system](notifications),
|
||||
calling webhooks in response to activity, and both extensive logging and reporting,
|
||||
mostly useful for large installations that want to collect metrics.
|
||||
|
||||
## Understanding image naming
|
||||
|
||||
Image names as used in typical docker commands reflect their origin:
|
||||
|
||||
* `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command
|
||||
* `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar`
|
||||
|
||||
You can find out more about the various Docker commands dealing with images in
|
||||
the [Docker engine documentation](https://docs.docker.com/engine/reference/commandline/cli/).
|
||||
|
||||
## Use cases
|
||||
|
||||
Running your own Registry is a great solution to integrate with and complement
|
||||
your CI/CD system. In a typical workflow, a commit to your source revision
|
||||
control system would trigger a build on your CI system, which would then push a
|
||||
new image to your Registry if the build is successful. A notification from the
|
||||
Registry would then trigger a deployment on a staging environment, or notify
|
||||
other systems that a new image is available.
|
||||
|
||||
It's also an essential component if you want to quickly deploy a new image over
|
||||
a large cluster of machines.
|
||||
|
||||
Finally, it's the best way to distribute images inside an isolated network.
|
||||
|
||||
## Requirements
|
||||
|
||||
You absolutely need to be familiar with Docker, specifically with regard to
|
||||
pushing and pulling images. You must understand the difference between the
|
||||
daemon and the cli, and at least grasp basic concepts about networking.
|
||||
|
||||
Also, while just starting a registry is fairly easy, operating it in a
|
||||
production environment requires operational skills, just like any other service.
|
||||
You are expected to be familiar with systems availability and scalability,
|
||||
logging and log processing, systems monitoring, and security 101. Strong
|
||||
understanding of http and overall network communications, plus familiarity with
|
||||
golang are certainly useful as well for advanced operations or hacking.
|
||||
|
||||
## Next
|
||||
|
||||
Dive into [deploying your registry](deploying)
|
52
docs/content/about/architecture.md
Normal file
52
docs/content/about/architecture.md
Normal file
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
draft: true
|
||||
---
|
||||
|
||||
# Architecture
|
||||
|
||||
## Design
|
||||
**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios.
|
||||
|
||||
### Eventual Consistency
|
||||
|
||||
> **NOTE:** This section belongs somewhere, perhaps in a design document. We
|
||||
> are leaving this here so the information is not lost.
|
||||
|
||||
Running the registry on eventually consistent backends has been part of the
|
||||
design from the beginning. This section covers some of the approaches to
|
||||
dealing with this reality.
|
||||
|
||||
There are a few classes of issues that we need to worry about when
|
||||
implementing something on top of the storage drivers:
|
||||
|
||||
1. Read-After-Write consistency (see this [article on
|
||||
s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)).
|
||||
2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict).
|
||||
|
||||
In reality, the registry must worry about these kinds of errors when doing the
|
||||
following:
|
||||
|
||||
1. Accepting data into a temporary upload file may not have latest data block
|
||||
yet (read-after-write).
|
||||
2. Moving uploaded data into its blob location (write-write race).
|
||||
3. Modifying the "current" manifest for given tag (write-write race).
|
||||
4. A whole slew of operations around deletes (read-after-write, delete-write
|
||||
races, garbage collection, etc.).
|
||||
|
||||
The backend path layout employs a few techniques to avoid these problems:
|
||||
|
||||
1. Large writes are done to private upload directories. This alleviates most
|
||||
of the corruption potential under multiple writers by avoiding multiple
|
||||
writers.
|
||||
2. Constraints in storage driver implementations, such as support for writing
|
||||
after the end of a file to extend it.
|
||||
3. Digest verification to avoid data corruption.
|
||||
4. Manifest files are stored by digest and cannot change.
|
||||
5. All other non-content files (links, hashes, etc.) are written as an atomic
|
||||
unit. Anything that requires additions and deletions is broken out into
|
||||
separate "files". Last writer still wins.
|
||||
|
||||
Unfortunately, one must play this game when trying to build something like
|
||||
this on top of eventually consistent storage systems. If we run into serious
|
||||
problems, we can wrap the storagedrivers in a shared consistency layer but
|
||||
that would increase complexity and hinder registry cluster performance.
|
76
docs/content/about/compatibility.md
Normal file
76
docs/content/about/compatibility.md
Normal file
|
@ -0,0 +1,76 @@
|
|||
---
|
||||
description: describes get by digest pitfall
|
||||
keywords: registry, manifest, images, tags, repository, distribution, digest
|
||||
title: Registry compatibility
|
||||
---
|
||||
|
||||
## Synopsis
|
||||
|
||||
If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9
|
||||
and older, and the manifest was pushed with Docker Engine 1.10, a security check
|
||||
causes the Engine to receive a manifest it cannot use and the pull fails.
|
||||
|
||||
## Registry manifest support
|
||||
|
||||
Historically, the registry has supported a single manifest type
|
||||
known as _Schema 1_.
|
||||
|
||||
With the move toward multiple architecture images, the distribution project
|
||||
introduced two new manifest types: Schema 2 manifests and manifest lists. Registry
|
||||
2.3 supports all three manifest types and sometimes performs an on-the-fly
|
||||
transformation of a manifest before serving the JSON in the response, to
|
||||
preserve compatibility with older versions of Docker Engine.
|
||||
|
||||
This conversion has some implications for pulling manifests by digest and this
|
||||
document enumerates these implications.
|
||||
|
||||
## Content Addressable Storage (CAS)
|
||||
|
||||
Manifests are stored and retrieved in the registry by keying off a digest
|
||||
representing a hash of the contents. One of the advantages provided by CAS is
|
||||
security: if the contents are changed, then the digest no longer matches.
|
||||
This prevents any modification of the manifest by a MITM attack or an untrusted
|
||||
third party.
|
||||
|
||||
When a manifest is stored by the registry, this digest is returned in the HTTP
|
||||
response headers and, if events are configured, delivered within the event. The
|
||||
manifest can either be retrieved by the tag, or this digest.
|
||||
|
||||
For registry versions 2.2.1 and below, the registry always stores and
|
||||
serves _Schema 1_ manifests. Engine 1.10 first
|
||||
attempts to send a _Schema 2_ manifest, falling back to sending a
|
||||
Schema 1 type manifest when it detects that the registry does not
|
||||
support the new version.
|
||||
|
||||
## Registry v2.3
|
||||
|
||||
### Manifest push with Docker 1.10
|
||||
|
||||
The Engine constructs a _Schema 2_ manifest which the
|
||||
registry persists to disk.
|
||||
|
||||
When the manifest is pulled by digest or tag with Docker Engine 1.10, a
|
||||
_Schema 2_ manifest is returned. Docker Engine 1.10
|
||||
understands the new manifest format.
|
||||
|
||||
When the manifest is pulled by *tag* with Docker Engine 1.9 and older, the
|
||||
manifest is converted on-the-fly to _Schema 1_ and sent in the
|
||||
response. The Docker Engine 1.9 is compatible with this older format.
|
||||
|
||||
When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the
|
||||
same rewriting process does not happen in the registry. If it did,
|
||||
the digest would no longer match the hash of the manifest and would violate the
|
||||
constraints of CAS.
|
||||
|
||||
For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker
|
||||
Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a
|
||||
security check causes the Engine to receive a manifest it cannot use and the
|
||||
pull fails.
|
||||
|
||||
### Manifest push with Docker 1.9 and older
|
||||
|
||||
The Docker Engine constructs a _Schema 1_ manifest which the
|
||||
registry persists to disk.
|
||||
|
||||
When the manifest is pulled by digest or tag with any Docker version, a
|
||||
_Schema 1_ manifest is returned.
|
1335
docs/content/about/configuration.md
Normal file
1335
docs/content/about/configuration.md
Normal file
File diff suppressed because it is too large
Load diff
580
docs/content/about/deploying.md
Normal file
580
docs/content/about/deploying.md
Normal file
|
@ -0,0 +1,580 @@
|
|||
---
|
||||
description: Explains how to deploy a registry
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, deployment
|
||||
title: Deploy a registry server
|
||||
---
|
||||
|
||||
Before you can deploy a registry, you need to install Docker on the host.
|
||||
A registry is an instance of the `registry` image, and runs within Docker.
|
||||
|
||||
This topic provides basic information about deploying and configuring a
|
||||
registry. For an exhaustive list of configuration options, see the
|
||||
[configuration reference](configuration.md).
|
||||
|
||||
If you have an air-gapped datacenter, see
|
||||
[Considerations for air-gapped registries](#considerations-for-air-gapped-registries).
|
||||
|
||||
## Run a local registry
|
||||
|
||||
Use a command like the following to start the registry container:
|
||||
|
||||
```console
|
||||
$ docker run -d -p 5000:5000 --restart=always --name registry registry:2
|
||||
```
|
||||
|
||||
The registry is now ready to use.
|
||||
|
||||
> **Warning**: These first few examples show registry configurations that are
|
||||
> only appropriate for testing. A production-ready registry must be protected by
|
||||
> TLS and should ideally use an access-control mechanism. Keep reading and then
|
||||
> continue to the [configuration guide](configuration.md) to deploy a
|
||||
> production-ready registry.
|
||||
|
||||
## Copy an image from Docker Hub to your registry
|
||||
|
||||
You can pull an image from Docker Hub and push it to your registry. The
|
||||
following example pulls the `ubuntu:16.04` image from Docker Hub and re-tags it
|
||||
as `my-ubuntu`, then pushes it to the local registry. Finally, the
|
||||
`ubuntu:16.04` and `my-ubuntu` images are deleted locally and the
|
||||
`my-ubuntu` image is pulled from the local registry.
|
||||
|
||||
1. Pull the `ubuntu:16.04` image from Docker Hub.
|
||||
|
||||
```console
|
||||
$ docker pull ubuntu:16.04
|
||||
```
|
||||
|
||||
2. Tag the image as `localhost:5000/my-ubuntu`. This creates an additional tag
|
||||
for the existing image. When the first part of the tag is a hostname and
|
||||
port, Docker interprets this as the location of a registry, when pushing.
|
||||
|
||||
```console
|
||||
$ docker tag ubuntu:16.04 localhost:5000/my-ubuntu
|
||||
```
|
||||
|
||||
3. Push the image to the local registry running at `localhost:5000`:
|
||||
|
||||
```console
|
||||
$ docker push localhost:5000/my-ubuntu
|
||||
```
|
||||
|
||||
4. Remove the locally-cached `ubuntu:16.04` and `localhost:5000/my-ubuntu`
|
||||
images, so that you can test pulling the image from your registry. This
|
||||
does not remove the `localhost:5000/my-ubuntu` image from your registry.
|
||||
|
||||
```console
|
||||
$ docker image remove ubuntu:16.04
|
||||
$ docker image remove localhost:5000/my-ubuntu
|
||||
```
|
||||
|
||||
5. Pull the `localhost:5000/my-ubuntu` image from your local registry.
|
||||
|
||||
```console
|
||||
$ docker pull localhost:5000/my-ubuntu
|
||||
```
|
||||
|
||||
## Stop a local registry
|
||||
|
||||
To stop the registry, use the same `docker container stop` command as with any other
|
||||
container.
|
||||
|
||||
```console
|
||||
$ docker container stop registry
|
||||
```
|
||||
|
||||
To remove the container, use `docker container rm`.
|
||||
|
||||
```console
|
||||
$ docker container stop registry && docker container rm -v registry
|
||||
```
|
||||
|
||||
## Basic configuration
|
||||
|
||||
To configure the container, you can pass additional or modified options to the
|
||||
`docker run` command.
|
||||
|
||||
The following sections provide basic guidelines for configuring your registry.
|
||||
For more details, see the [registry configuration reference](configuration.md).
|
||||
|
||||
### Start the registry automatically
|
||||
|
||||
If you want to use the registry as part of your permanent infrastructure, you
|
||||
should set it to restart automatically when Docker restarts or if it exits.
|
||||
This example uses the `--restart always` flag to set a restart policy for the
|
||||
registry.
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
-p 5000:5000 \
|
||||
--restart=always \
|
||||
--name registry \
|
||||
registry:2
|
||||
```
|
||||
|
||||
### Customize the published port
|
||||
|
||||
If you are already using port 5000, or you want to run multiple local
|
||||
registries to separate areas of concern, you can customize the registry's
|
||||
port settings. This example runs the registry on port 5001 and also names it
|
||||
`registry-test`. Remember, the first part of the `-p` value is the host port
|
||||
and the second part is the port within the container. Within the container, the
|
||||
registry listens on port `5000` by default.
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
-p 5001:5000 \
|
||||
--name registry-test \
|
||||
registry:2
|
||||
```
|
||||
|
||||
If you want to change the port the registry listens on within the container, you
|
||||
can use the environment variable `REGISTRY_HTTP_ADDR` to change it. This command
|
||||
causes the registry to listen on port 5001 within the container:
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
-e REGISTRY_HTTP_ADDR=0.0.0.0:5001 \
|
||||
-p 5001:5001 \
|
||||
--name registry-test \
|
||||
registry:2
|
||||
```
|
||||
|
||||
|
||||
## Storage customization
|
||||
|
||||
### Customize the storage location
|
||||
|
||||
By default, your registry data is persisted as a [docker volume](https://docs.docker.com/storage/volumes)
|
||||
on the host filesystem. If you want to store your registry contents at a specific
|
||||
location on your host filesystem, such as if you have an SSD or SAN mounted into
|
||||
a particular directory, you might decide to use a bind mount instead. A bind mount
|
||||
is more dependent on the filesystem layout of the Docker host, but more performant
|
||||
in many situations. The following example bind-mounts the host directory
|
||||
`/mnt/registry` into the registry container at `/var/lib/registry/`.
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
-p 5000:5000 \
|
||||
--restart=always \
|
||||
--name registry \
|
||||
-v /mnt/registry:/var/lib/registry \
|
||||
registry:2
|
||||
```
|
||||
|
||||
### Customize the storage back-end
|
||||
|
||||
By default, the registry stores its data on the local filesystem, whether you
|
||||
use a bind mount or a volume. You can store the registry data in an Amazon S3
|
||||
bucket, Google Cloud Platform, or on another storage back-end by using
|
||||
[storage drivers](../storage-drivers/_index.md). For more information, see
|
||||
[storage configuration options](configuration.md#storage).
|
||||
|
||||
## Run an externally-accessible registry
|
||||
|
||||
Running a registry only accessible on `localhost` has limited usefulness. In
|
||||
order to make your registry accessible to external hosts, you must first secure
|
||||
it using TLS.
|
||||
|
||||
This example is extended in [Run the registry as a
|
||||
service](#run-the-registry-as-a-service) below.
|
||||
|
||||
### Get a certificate
|
||||
|
||||
These examples assume the following:
|
||||
|
||||
- Your registry URL is `https://myregistry.domain.com/`.
|
||||
- Your DNS, routing, and firewall settings allow access to the registry's host
|
||||
on port 443.
|
||||
- You have already obtained a certificate from a certificate authority (CA).
|
||||
|
||||
If you have been issued an _intermediate_ certificate instead, see
|
||||
[use an intermediate certificate](#use-an-intermediate-certificate).
|
||||
|
||||
1. Create a `certs` directory.
|
||||
|
||||
```console
|
||||
$ mkdir -p certs
|
||||
```
|
||||
|
||||
Copy the `.crt` and `.key` files from the CA into the `certs` directory.
|
||||
The following steps assume that the files are named `domain.crt` and
|
||||
`domain.key`.
|
||||
|
||||
2. Stop the registry if it is currently running.
|
||||
|
||||
```console
|
||||
$ docker container stop registry
|
||||
```
|
||||
|
||||
3. Restart the registry, directing it to use the TLS certificate. This command
|
||||
bind-mounts the `certs/` directory into the container at `/certs/`, and sets
|
||||
environment variables that tell the container where to find the `domain.crt`
|
||||
and `domain.key` file. The registry runs on port 443, the default HTTPS port.
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
--restart=always \
|
||||
--name registry \
|
||||
-v "$(pwd)"/certs:/certs \
|
||||
-e REGISTRY_HTTP_ADDR=0.0.0.0:443 \
|
||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
|
||||
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
|
||||
-p 443:443 \
|
||||
registry:2
|
||||
```
|
||||
|
||||
4. Docker clients can now pull from and push to your registry using its
|
||||
external address. The following commands demonstrate this:
|
||||
|
||||
```console
|
||||
$ docker pull ubuntu:16.04
|
||||
$ docker tag ubuntu:16.04 myregistry.domain.com/my-ubuntu
|
||||
$ docker push myregistry.domain.com/my-ubuntu
|
||||
$ docker pull myregistry.domain.com/my-ubuntu
|
||||
```
|
||||
|
||||
#### Use an intermediate certificate
|
||||
|
||||
A certificate issuer may supply you with an *intermediate* certificate. In this
|
||||
case, you must concatenate your certificate with the intermediate certificate to
|
||||
form a *certificate bundle*. You can do this using the `cat` command:
|
||||
|
||||
```console
|
||||
cat domain.crt intermediate-certificates.pem > certs/domain.crt
|
||||
```
|
||||
|
||||
You can use the certificate bundle just as you use the `domain.crt` file in
|
||||
the previous example.
|
||||
|
||||
### Support for Let's Encrypt
|
||||
|
||||
The registry supports using Let's Encrypt to automatically obtain a
|
||||
browser-trusted certificate. For more information on Let's Encrypt, see
|
||||
[https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/)
|
||||
and the relevant section of the
|
||||
[registry configuration](configuration.md#letsencrypt).
|
||||
|
||||
### Use an insecure registry (testing only)
|
||||
|
||||
It is possible to use a self-signed certificate, or to use our registry
|
||||
insecurely. Unless you have set up verification for your self-signed
|
||||
certificate, this is for testing only. See [run an insecure registry](insecure.md).
|
||||
|
||||
## Run the registry as a service
|
||||
|
||||
[Swarm services](https://docs.docker.com/engine/swarm/services) provide several advantages over
|
||||
standalone containers. They use a declarative model, which means that you define
|
||||
the desired state and Docker works to keep your service in that state. Services
|
||||
provide automatic load balancing scaling, and the ability to control the
|
||||
distribution of your service, among other advantages. Services also allow you to
|
||||
store sensitive data such as TLS certificates in
|
||||
[secrets](https://docs.docker.com/engine/swarm/secrets).
|
||||
|
||||
The storage back-end you use determines whether you use a fully scaled service
|
||||
or a service with either only a single node or a node constraint.
|
||||
|
||||
- If you use a distributed storage driver, such as Amazon S3, you can use a
|
||||
fully replicated service. Each worker can write to the storage back-end
|
||||
without causing write conflicts.
|
||||
|
||||
- If you use a local bind mount or volume, each worker node writes to its
|
||||
own storage location, which means that each registry contains a different
|
||||
data set. You can solve this problem by using a single-replica service and a
|
||||
node constraint to ensure that only a single worker is writing to the bind
|
||||
mount.
|
||||
|
||||
The following example starts a registry as a single-replica service, which is
|
||||
accessible on any swarm node on port 80. It assumes you are using the same
|
||||
TLS certificates as in the previous examples.
|
||||
|
||||
First, save the TLS certificate and key as secrets:
|
||||
|
||||
```console
|
||||
$ docker secret create domain.crt certs/domain.crt
|
||||
|
||||
$ docker secret create domain.key certs/domain.key
|
||||
```
|
||||
|
||||
Next, add a label to the node where you want to run the registry.
|
||||
To get the node's name, use `docker node ls`. Substitute your node's name for
|
||||
`node1` below.
|
||||
|
||||
```console
|
||||
$ docker node update --label-add registry=true node1
|
||||
```
|
||||
|
||||
Next, create the service, granting it access to the two secrets and constraining
|
||||
it to only run on nodes with the label `registry=true`. Besides the constraint,
|
||||
you are also specifying that only a single replica should run at a time. The
|
||||
example bind-mounts `/mnt/registry` on the swarm node to `/var/lib/registry/`
|
||||
within the container. Bind mounts rely on the pre-existing source directory,
|
||||
so be sure `/mnt/registry` exists on `node1`. You might need to create it before
|
||||
running the following `docker service create` command.
|
||||
|
||||
By default, secrets are mounted into a service at `/run/secrets/<secret-name>`.
|
||||
|
||||
```console
|
||||
$ docker service create \
|
||||
--name registry \
|
||||
--secret domain.crt \
|
||||
--secret domain.key \
|
||||
--constraint 'node.labels.registry==true' \
|
||||
--mount type=bind,src=/mnt/registry,dst=/var/lib/registry \
|
||||
-e REGISTRY_HTTP_ADDR=0.0.0.0:443 \
|
||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/run/secrets/domain.crt \
|
||||
-e REGISTRY_HTTP_TLS_KEY=/run/secrets/domain.key \
|
||||
--publish published=443,target=443 \
|
||||
--replicas 1 \
|
||||
registry:2
|
||||
```
|
||||
|
||||
You can access the service on port 443 of any swarm node. Docker sends the
|
||||
requests to the node which is running the service.
|
||||
|
||||
## Load balancing considerations
|
||||
|
||||
One may want to use a load balancer to distribute load, terminate TLS or
|
||||
provide high availability. While a full load balancing setup is outside the
|
||||
scope of this document, there are a few considerations that can make the process
|
||||
smoother.
|
||||
|
||||
The most important aspect is that a load balanced cluster of registries must
|
||||
share the same resources. For the current version of the registry, this means
|
||||
the following must be the same:
|
||||
|
||||
- Storage Driver
|
||||
- HTTP Secret
|
||||
- Redis Cache (if configured)
|
||||
|
||||
Differences in any of the above cause problems serving requests.
|
||||
As an example, if you're using the filesystem driver, all registry instances
|
||||
must have access to the same filesystem root, on
|
||||
the same machine. For other drivers, such as S3 or Azure, they should be
|
||||
accessing the same resource and share an identical configuration.
|
||||
The _HTTP Secret_ coordinates uploads, so also must be the same across
|
||||
instances. Configuring different redis instances works (at the time
|
||||
of writing), but is not optimal if the instances are not shared, because
|
||||
more requests are directed to the backend.
|
||||
|
||||
### Important/Required HTTP-Headers
|
||||
|
||||
Getting the headers correct is very important. For all responses to any
|
||||
request under the "/v2/" url space, the `Docker-Distribution-API-Version`
|
||||
header should be set to the value "registry/2.0", even for a 4xx response.
|
||||
This header allows the docker engine to quickly resolve authentication realms
|
||||
and fallback to version 1 registries, if necessary. Confirming this is setup
|
||||
correctly can help avoid problems with fallback.
|
||||
|
||||
In the same train of thought, you must make sure you are properly sending the
|
||||
`X-Forwarded-Proto`, `X-Forwarded-For`, and `Host` headers to their "client-side"
|
||||
values. Failure to do so usually makes the registry issue redirects to internal
|
||||
hostnames or downgrading from https to http.
|
||||
|
||||
A properly secured registry should return 401 when the "/v2/" endpoint is hit
|
||||
without credentials. The response should include a `WWW-Authenticate`
|
||||
challenge, providing guidance on how to authenticate, such as with basic auth
|
||||
or a token service. If the load balancer has health checks, it is recommended
|
||||
to configure it to consider a 401 response as healthy and any other as down.
|
||||
This secures your registry by ensuring that configuration problems with
|
||||
authentication don't accidentally expose an unprotected registry. If you're
|
||||
using a less sophisticated load balancer, such as Amazon's Elastic Load
|
||||
Balancer, that doesn't allow one to change the healthy response code, health
|
||||
checks can be directed at "/", which always returns a `200 OK` response.
|
||||
|
||||
## Restricting access
|
||||
|
||||
Except for registries running on secure local networks, registries should always
|
||||
implement access restrictions.
|
||||
|
||||
### Native basic auth
|
||||
|
||||
The simplest way to achieve access restriction is through basic authentication
|
||||
(this is very similar to other web servers' basic authentication mechanism).
|
||||
This example uses native basic authentication using `htpasswd` to store the
|
||||
secrets.
|
||||
|
||||
{{< hint type=warning >}}
|
||||
You **cannot** use authentication with authentication schemes that send
|
||||
credentials as clear text. You must
|
||||
[configure TLS first](#run-an-externally-accessible-registry) for
|
||||
authentication to work.
|
||||
{{< /hint >}}
|
||||
|
||||
{{< hint type=warning >}}
|
||||
The distribution registry **only** supports htpasswd credentials in
|
||||
bcrypt format, so if you omit the `-B` option when generating the credential
|
||||
using htpasswd, all authentication attempts will fail.
|
||||
{{< /hint >}}
|
||||
|
||||
1. Create a password file with one entry for the user `testuser`, with password
|
||||
`testpassword`:
|
||||
|
||||
```console
|
||||
$ mkdir auth
|
||||
$ docker run \
|
||||
--entrypoint htpasswd \
|
||||
httpd:2 -Bbn testuser testpassword > auth/htpasswd
|
||||
```
|
||||
|
||||
On Windows, make sure the output file is correctly encoded:
|
||||
|
||||
```powershell
|
||||
docker run --rm --entrypoint htpasswd httpd:2 -Bbn testuser testpassword | Set-Content -Encoding ASCII auth/htpasswd
|
||||
```
|
||||
|
||||
2. Stop the registry.
|
||||
|
||||
```console
|
||||
$ docker container stop registry
|
||||
```
|
||||
|
||||
3. Start the registry with basic authentication.
|
||||
|
||||
```console
|
||||
$ docker run -d \
|
||||
-p 5000:5000 \
|
||||
--restart=always \
|
||||
--name registry \
|
||||
-v "$(pwd)"/auth:/auth \
|
||||
-e "REGISTRY_AUTH=htpasswd" \
|
||||
-e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
|
||||
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
|
||||
-v "$(pwd)"/certs:/certs \
|
||||
-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
|
||||
-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
|
||||
registry:2
|
||||
```
|
||||
|
||||
4. Try to pull an image from the registry, or push an image to the registry.
|
||||
These commands fail.
|
||||
|
||||
5. Log in to the registry.
|
||||
|
||||
```console
|
||||
$ docker login myregistrydomain.com:5000
|
||||
```
|
||||
|
||||
Provide the username and password from the first step.
|
||||
|
||||
Test that you can now pull an image from the registry or push an image to
|
||||
the registry.
|
||||
|
||||
{{< hint type=note title="X509 errors" >}}
|
||||
X509 errors usually indicate that you are attempting to use
|
||||
a self-signed certificate without configuring the Docker daemon correctly.
|
||||
See [run an insecure registry](insecure.md).
|
||||
{{< /hint >}}
|
||||
|
||||
### More advanced authentication
|
||||
|
||||
You may want to leverage more advanced basic auth implementations by using a
|
||||
proxy in front of the registry. See the [recipes list](../recipes/_index.md).
|
||||
|
||||
The registry also supports delegated authentication which redirects users to a
|
||||
specific trusted token server. This approach is more complicated to set up, and
|
||||
only makes sense if you need to fully configure ACLs and need more control over
|
||||
the registry's integration into your global authorization and authentication
|
||||
systems. Refer to the following [background information](../spec/auth/token.md) and
|
||||
[configuration information here](configuration.md#auth).
|
||||
|
||||
This approach requires you to implement your own authentication system or
|
||||
leverage a third-party implementation.
|
||||
|
||||
## Deploy your registry using a Compose file
|
||||
|
||||
If your registry invocation is advanced, it may be easier to use a Docker
|
||||
compose file to deploy it, rather than relying on a specific `docker run`
|
||||
invocation. Use the following example `docker-compose.yml` as a template.
|
||||
|
||||
```yaml
|
||||
registry:
|
||||
restart: always
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
environment:
|
||||
REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt
|
||||
REGISTRY_HTTP_TLS_KEY: /certs/domain.key
|
||||
REGISTRY_AUTH: htpasswd
|
||||
REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
|
||||
REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
|
||||
volumes:
|
||||
- /path/data:/var/lib/registry
|
||||
- /path/certs:/certs
|
||||
- /path/auth:/auth
|
||||
```
|
||||
|
||||
Replace `/path` with the directory which contains the `certs/` and `auth/`
|
||||
directories.
|
||||
{:.warning}
|
||||
|
||||
Start your registry by issuing the following command in the directory containing
|
||||
the `docker-compose.yml` file:
|
||||
|
||||
```console
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
## Considerations for air-gapped registries
|
||||
|
||||
You can run a registry in an environment with no internet connectivity.
|
||||
However, if you rely on any images which are not local, you need to consider the
|
||||
following:
|
||||
|
||||
- You may need to build your local registry's data volume on a connected
|
||||
host where you can run `docker pull` to get any images which are available
|
||||
remotely, and then migrate the registry's data volume to the air-gapped
|
||||
network.
|
||||
|
||||
- Certain images, such as the official Microsoft Windows base images, are not
|
||||
distributable. This means that when you push an image based on one of these
|
||||
images to your private registry, the non-distributable layers are **not**
|
||||
pushed, but are always fetched from their authorized location. This is fine
|
||||
for internet-connected hosts, but not in an air-gapped set-up.
|
||||
|
||||
You can configure the Docker daemon to allow pushing non-distributable layers
|
||||
to private registries.
|
||||
**This is only useful in air-gapped set-ups in the presence of
|
||||
non-distributable images, or in extremely bandwidth-limited situations.**
|
||||
You are responsible for ensuring that you are in compliance with the terms of
|
||||
use for non-distributable layers.
|
||||
|
||||
1. Edit the `daemon.json` file, which is located in `/etc/docker/` on Linux
|
||||
hosts and `C:\ProgramData\docker\config\daemon.json` on Windows Server.
|
||||
Assuming the file was previously empty, add the following contents:
|
||||
|
||||
```json
|
||||
{
|
||||
"allow-nondistributable-artifacts": ["myregistrydomain.com:5000"]
|
||||
}
|
||||
```
|
||||
|
||||
The value is an array of registry addresses, separated by commas.
|
||||
|
||||
Save and exit the file.
|
||||
|
||||
2. Restart Docker.
|
||||
|
||||
3. Restart the registry if it does not start automatically.
|
||||
|
||||
4. When you push images to the registries in the list, their
|
||||
non-distributable layers are pushed to the registry.
|
||||
|
||||
{{< hint type=warning >}}
|
||||
Non-distributable artifacts typically have restrictions on
|
||||
how and where they can be distributed and shared. Only use this feature
|
||||
to push artifacts to private registries and ensure that you are in
|
||||
compliance with any terms that cover redistributing non-distributable
|
||||
artifacts.
|
||||
{{< /hint >}}
|
||||
|
||||
## Next steps
|
||||
|
||||
More specific and advanced information is available in the following sections:
|
||||
|
||||
- [Configuration reference](configuration.md)
|
||||
- [Working with notifications](notifications.md)
|
||||
- [Advanced "recipes"](../recipes/_index.md)
|
||||
- [Registry API](../spec/api.md)
|
||||
- [Storage driver model](../storage-drivers/_index.md)
|
||||
- [Token authentication](../spec/auth/token.md)
|
124
docs/content/about/garbage-collection.md
Normal file
124
docs/content/about/garbage-collection.md
Normal file
|
@ -0,0 +1,124 @@
|
|||
---
|
||||
description: High level discussion of garbage collection
|
||||
keywords: registry, garbage, images, tags, repository, distribution
|
||||
title: Garbage collection
|
||||
---
|
||||
|
||||
As of v2.4.0 a garbage collector command is included within the registry binary.
|
||||
This document describes what this command does and how and why it should be used.
|
||||
|
||||
## About garbage collection
|
||||
|
||||
In the context of the registry, garbage collection is the process of
|
||||
removing blobs from the filesystem when they are no longer referenced by a
|
||||
manifest. Blobs can include both layers and manifests.
|
||||
|
||||
Registry data can occupy considerable amounts of disk space. In addition,
|
||||
garbage collection can be a security consideration, when it is desirable to ensure
|
||||
that certain layers no longer exist on the filesystem.
|
||||
|
||||
## Garbage collection in practice
|
||||
|
||||
Filesystem layers are stored by their content address in the Registry. This
|
||||
has many advantages, one of which is that data is stored once and referred to by manifests.
|
||||
See [here](compatibility.md#content-addressable-storage-cas) for more details.
|
||||
|
||||
Layers are therefore shared amongst manifests; each manifest maintains a reference
|
||||
to the layer. As long as a layer is referenced by one manifest, it cannot be garbage
|
||||
collected.
|
||||
|
||||
Manifests and layers can be `deleted` with the registry API (refer to the API
|
||||
documentation [here](../spec/api.md#deleting-a-layer) and
|
||||
[here](../spec/api.md#deleting-an-image) for details). This API removes references
|
||||
to the target and makes them eligible for garbage collection. It also makes them
|
||||
unable to be read via the API.
|
||||
|
||||
If a layer is deleted, it is removed from the filesystem when garbage collection
|
||||
is run. If a manifest is deleted the layers to which it refers are removed from
|
||||
the filesystem if no other manifests refers to them.
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
In this example manifest A references two layers: `a` and `b`. Manifest `B` references
|
||||
layers `a` and `c`. In this state, nothing is eligible for garbage collection:
|
||||
|
||||
```
|
||||
A -----> a <----- B
|
||||
\--> b |
|
||||
c <--/
|
||||
```
|
||||
|
||||
Manifest B is deleted via the API:
|
||||
|
||||
```
|
||||
A -----> a B
|
||||
\--> b
|
||||
c
|
||||
```
|
||||
|
||||
In this state layer `c` no longer has a reference and is eligible for garbage
|
||||
collection. Layer `a` had one reference removed but not garbage
|
||||
collected as it is still referenced by manifest `A`. The blob representing
|
||||
manifest `B` is eligible for garbage collection.
|
||||
|
||||
After garbage collection has been run, manifest `A` and its blobs remain.
|
||||
|
||||
```
|
||||
A -----> a
|
||||
\--> b
|
||||
```
|
||||
|
||||
|
||||
### More details about garbage collection
|
||||
|
||||
Garbage collection runs in two phases. First, in the 'mark' phase, the process
|
||||
scans all the manifests in the registry. From these manifests, it constructs a
|
||||
set of content address digests. This set is the 'mark set' and denotes the set
|
||||
of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all
|
||||
the blobs and if a blob's content address digest is not in the mark set, the
|
||||
process deletes it.
|
||||
|
||||
|
||||
> **Note**: You should ensure that the registry is in read-only mode or not running at
|
||||
> all. If you were to upload an image while garbage collection is running, there is the
|
||||
> risk that the image's layers are mistakenly deleted leading to a corrupted image.
|
||||
|
||||
This type of garbage collection is known as stop-the-world garbage collection.
|
||||
|
||||
## Run garbage collection
|
||||
|
||||
Garbage collection can be run as follows
|
||||
|
||||
`bin/registry garbage-collect [--dry-run] /path/to/config.yml`
|
||||
|
||||
The garbage-collect command accepts a `--dry-run` parameter, which prints the progress
|
||||
of the mark and sweep phases without removing any data. Running with a log level of `info`
|
||||
gives a clear indication of items eligible for deletion.
|
||||
|
||||
The config.yml file should be in the following format:
|
||||
|
||||
```yaml
|
||||
version: 0.1
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /registry/data
|
||||
```
|
||||
|
||||
_Sample output from a dry run garbage collection with registry log level set to `info`_
|
||||
|
||||
```
|
||||
hello-world
|
||||
hello-world: marking manifest sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf
|
||||
hello-world: marking blob sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb
|
||||
hello-world: marking blob sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4
|
||||
hello-world: marking configuration sha256:690ed74de00f99a7d00a98a5ad855ac4febd66412be132438f9b8dbd300a937d
|
||||
ubuntu
|
||||
|
||||
4 blobs marked, 5 blobs eligible for deletion
|
||||
blob eligible for deletion: sha256:28e09fddaacbfc8a13f82871d9d66141a6ed9ca526cb9ed295ef545ab4559b81
|
||||
blob eligible for deletion: sha256:7e15ce58ccb2181a8fced7709e9893206f0937cc9543bc0c8178ea1cf4d7e7b5
|
||||
blob eligible for deletion: sha256:87192bdbe00f8f2a62527f36bb4c7c7f4eaf9307e4b87e8334fb6abec1765bcb
|
||||
blob eligible for deletion: sha256:b549a9959a664038fc35c155a95742cf12297672ca0ae35735ec027d55bf4e97
|
||||
blob eligible for deletion: sha256:f251d679a7c61455f06d793e43c06786d7766c88b8c24edf242b2c08e3c3f599
|
||||
```
|
68
docs/content/about/glossary.md
Normal file
68
docs/content/about/glossary.md
Normal file
|
@ -0,0 +1,68 @@
|
|||
---
|
||||
draft: true
|
||||
---
|
||||
|
||||
# Glossary
|
||||
|
||||
This page contains definitions for distribution related terms.
|
||||
|
||||
<dl>
|
||||
<dt id="blob"><h4>Blob</h4></dt>
|
||||
<dd>
|
||||
<blockquote>A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").</blockquote>
|
||||
<p>
|
||||
<a href="#layer">Layers</a> are a good example of "blobs".
|
||||
</p>
|
||||
</dd>
|
||||
|
||||
<dt id="image"><h4>Image</h4></dt>
|
||||
<dd>
|
||||
<blockquote>An image is a named set of immutable data from which a container can be created.</blockquote>
|
||||
<p>
|
||||
An image is represented by a json file called a <a href="#manifest">manifest</a>, and is conceptually a set of <a href="#layer">layers</a>.
|
||||
|
||||
Image names indicate the location where they can be pulled from and pushed to, as they usually start with a <a href="#registry">registry</a> domain name and port.
|
||||
|
||||
</p>
|
||||
</dd>
|
||||
|
||||
<dt id="layer"><h4>Layer</h4></dt>
|
||||
<dd>
|
||||
<blockquote>A layer is a tar archive bundling partial content from a filesystem.</blockquote>
|
||||
<p>
|
||||
Layers from an <a href="#image">image</a> are usually extracted in order on top of each other to make up a root filesystem from which containers run out.
|
||||
</p>
|
||||
</dd>
|
||||
|
||||
<dt id="manifest"><h4>Manifest</h4></dt>
|
||||
<dd><blockquote>A manifest is the JSON representation of an image.</blockquote></dd>
|
||||
|
||||
<dt id="namespace"><h4>Namespace</h4></dt>
|
||||
<dd><blockquote>A namespace is a collection of repositories with a common name prefix.</blockquote>
|
||||
<p>
|
||||
The namespace with an empty prefix is considered the Global Namespace.
|
||||
</p>
|
||||
</dd>
|
||||
|
||||
<dt id="registry"><h4>Registry</h4></dt>
|
||||
<dd><blockquote>A registry is a service that let you store and deliver <a href="#images">images</a> and other content.</blockquote>
|
||||
</dd>
|
||||
|
||||
<dt id="registry"><h4>Repository</h4></dt>
|
||||
<dd>
|
||||
<blockquote>A repository is a set of data containing all versions of a given image.</blockquote>
|
||||
</dd>
|
||||
|
||||
<dt id="scope"><h4>Scope</h4></dt>
|
||||
<dd><blockquote>A scope is the portion of a namespace onto which a given authorization token is granted.</blockquote></dd>
|
||||
|
||||
<dt id="tag"><h4>Tag</h4></dt>
|
||||
<dd><blockquote>A tag is conceptually a "version" of a <a href="#image">named image</a>.</blockquote>
|
||||
<p>
|
||||
Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest".
|
||||
</p>
|
||||
|
||||
</dd>
|
||||
|
||||
|
||||
</dl>
|
12
docs/content/about/help.md
Normal file
12
docs/content/about/help.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
description: Getting help with the Registry
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, help, 101, TL;DR
|
||||
title: Get help
|
||||
---
|
||||
|
||||
If you need help, or just want to chat about development, you can reach us on the #distribution channel in the CNCF Slack.
|
||||
|
||||
If you want to report a bug:
|
||||
|
||||
- be sure to first read about [how to contribute](https://github.com/distribution/distribution/blob/master/CONTRIBUTING.md).
|
||||
- you can then do so on the [GitHub project bugtracker](https://github.com/distribution/distribution/issues).
|
165
docs/content/about/insecure.md
Normal file
165
docs/content/about/insecure.md
Normal file
|
@ -0,0 +1,165 @@
|
|||
---
|
||||
description: Deploying a Registry in an insecure fashion
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, insecure
|
||||
title: Test an insecure registry
|
||||
---
|
||||
|
||||
While it's highly recommended to secure your registry using a TLS certificate
|
||||
issued by a known CA, you can choose to use self-signed certificates, or use
|
||||
your registry over an unencrypted HTTP connection. Either of these choices
|
||||
involves security trade-offs and additional configuration steps.
|
||||
|
||||
## Deploy a plain HTTP registry
|
||||
|
||||
{{< hint type=warning >}}
|
||||
It's not possible to use an insecure registry with basic authentication.
|
||||
{{< /hint >}}
|
||||
|
||||
This procedure configures Docker to entirely disregard security for your
|
||||
registry. This is **very** insecure and is not recommended. It exposes your
|
||||
registry to trivial man-in-the-middle (MITM) attacks. Only use this solution for
|
||||
isolated testing or in a tightly controlled, air-gapped environment.
|
||||
|
||||
1. Edit the `daemon.json` file, whose default location is
|
||||
`/etc/docker/daemon.json` on Linux or
|
||||
`C:\ProgramData\docker\config\daemon.json` on Windows Server. If you use
|
||||
Docker Desktop for Mac or Docker Desktop for Windows, click the Docker icon, choose
|
||||
**Preferences** (Mac) or **Settings** (Windows), and choose **Docker Engine**.
|
||||
|
||||
If the `daemon.json` file does not exist, create it. Assuming there are no
|
||||
other settings in the file, it should have the following contents:
|
||||
|
||||
```json
|
||||
{
|
||||
"insecure-registries" : ["myregistrydomain.com:5000"]
|
||||
}
|
||||
```
|
||||
|
||||
Substitute the address of your insecure registry for the one in the example.
|
||||
|
||||
With insecure registries enabled, Docker goes through the following steps:
|
||||
|
||||
- First, try using HTTPS.
|
||||
|
||||
- If HTTPS is available but the certificate is invalid, ignore the error
|
||||
about the certificate.
|
||||
|
||||
- If HTTPS is not available, fall back to HTTP.
|
||||
|
||||
|
||||
2. Restart Docker for the changes to take effect.
|
||||
|
||||
Repeat these steps on every Engine host that wants to access your registry.
|
||||
|
||||
## Use self-signed certificates
|
||||
|
||||
{{< hint type=warning >}}
|
||||
Using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below)
|
||||
{{< /hint >}}
|
||||
|
||||
This is more secure than the insecure registry solution.
|
||||
|
||||
1. Generate your own certificate:
|
||||
|
||||
```console
|
||||
$ mkdir -p certs
|
||||
|
||||
$ openssl req \
|
||||
-newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \
|
||||
-addext "subjectAltName = DNS:myregistry.domain.com" \
|
||||
-x509 -days 365 -out certs/domain.crt
|
||||
```
|
||||
|
||||
Be sure to use the name `myregistry.domain.com` as a CN.
|
||||
|
||||
2. Use the result to [start your registry with TLS enabled](deploying.md#get-a-certificate).
|
||||
|
||||
3. Instruct every Docker daemon to trust that certificate. The way to do this
|
||||
depends on your OS.
|
||||
|
||||
- **Linux**: Copy the `domain.crt` file to
|
||||
`/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt` on every Docker
|
||||
host. You do not need to restart Docker.
|
||||
|
||||
- **Windows Server**:
|
||||
|
||||
1. Open Windows Explorer, right-click the `domain.crt`
|
||||
file, and choose Install certificate. When prompted, select the following
|
||||
options:
|
||||
|
||||
| Store location | local machine |
|
||||
| Place all certificates in the following store | selected |
|
||||
|
||||
2. Click **Browser** and select **Trusted Root Certificate Authorities**.
|
||||
|
||||
3. Click **Finish**. Restart Docker.
|
||||
|
||||
- **Docker Desktop for Mac**: Follow the instructions in
|
||||
[Adding custom CA certificates](https://docs.docker.com/desktop/mac/#add-tls-certificates).
|
||||
Restart Docker.
|
||||
|
||||
- **Docker Desktop for Windows**: Follow the instructions in
|
||||
[Adding custom CA certificates](https://docs.docker.com/desktop/windows/#adding-tls-certificates).
|
||||
Restart Docker.
|
||||
|
||||
|
||||
## Troubleshoot insecure registry
|
||||
|
||||
This section lists some common failures and how to recover from them.
|
||||
|
||||
### Failing...
|
||||
|
||||
Failing to configure the Engine daemon and trying to pull from a registry that is not using
|
||||
TLS results in the following message:
|
||||
|
||||
```none
|
||||
FATA[0000] Error response from daemon: v1 ping attempt failed with error:
|
||||
Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527.
|
||||
If this private registry supports only HTTP or HTTPS with an unknown CA certificate, add
|
||||
`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments.
|
||||
In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag;
|
||||
simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt
|
||||
```
|
||||
|
||||
### Docker still complains about the certificate when using authentication?
|
||||
|
||||
When using authentication, some versions of Docker also require you to trust the
|
||||
certificate at the OS level.
|
||||
|
||||
#### Ubuntu
|
||||
|
||||
```console
|
||||
$ cp certs/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt
|
||||
update-ca-certificates
|
||||
```
|
||||
|
||||
#### Red Hat Enterprise Linux
|
||||
|
||||
```console
|
||||
$ cp certs/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt
|
||||
update-ca-trust
|
||||
```
|
||||
|
||||
#### Oracle Linux
|
||||
|
||||
```console
|
||||
$ update-ca-trust enable
|
||||
```
|
||||
|
||||
Restart Docker for the changes to take effect.
|
||||
|
||||
### Windows
|
||||
|
||||
Open Windows Explorer, right-click the certificate, and choose
|
||||
**Install certificate**.
|
||||
|
||||
Then, select the following options:
|
||||
|
||||
* Store location: local machine
|
||||
* Check **place all certificates in the following store**
|
||||
* Click **Browser**, and select **Trusted Root Certificate Authorities**
|
||||
* Click **Finish**
|
||||
|
||||
[Learn more about managing TLS certificates](https://technet.microsoft.com/en-us/library/cc754841(v=ws.11).aspx#BKMK_addlocal).
|
||||
|
||||
After adding the CA certificate to Windows, restart Docker Desktop for Windows.
|
348
docs/content/about/notifications.md
Normal file
348
docs/content/about/notifications.md
Normal file
|
@ -0,0 +1,348 @@
|
|||
---
|
||||
description: Explains how to work with registry notifications
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, notifications, advanced
|
||||
title: Work with notifications
|
||||
---
|
||||
|
||||
The Registry supports sending webhook notifications in response to events
|
||||
happening within the registry. Notifications are sent in response to manifest
|
||||
pushes and pulls and layer pushes and pulls. These actions are serialized into
|
||||
events. The events are queued into a registry-internal broadcast system which
|
||||
queues and dispatches events to [_Endpoints_](#endpoints).
|
||||
|
||||

|
||||
|
||||
## Endpoints
|
||||
|
||||
Notifications are sent to _endpoints_ via HTTP requests. Each configured
|
||||
endpoint has isolated queues, retry configuration and http targets within each
|
||||
instance of a registry. When an action happens within the registry, it is
|
||||
converted into an event which is dropped into an inmemory queue. When the
|
||||
event reaches the end of the queue, an http request is made to the endpoint
|
||||
until the request succeeds. The events are sent serially to each endpoint but
|
||||
order is not guaranteed.
|
||||
|
||||
## Configuration
|
||||
|
||||
To set up a registry instance to send notifications to endpoints, one must add
|
||||
them to the configuration. A simple example follows:
|
||||
|
||||
```yaml
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: alistener
|
||||
url: https://mylistener.example.com/event
|
||||
headers:
|
||||
Authorization: [Bearer <your token, if needed>]
|
||||
timeout: 500ms
|
||||
threshold: 5
|
||||
backoff: 1s
|
||||
```
|
||||
|
||||
The above would configure the registry with an endpoint to send events to
|
||||
`https://mylistener.example.com/event`, with the header "Authorization: Bearer
|
||||
<your token, if needed>". The request would timeout after 500 milliseconds. If
|
||||
5 failures happen consecutively, the registry backs off for 1 second before
|
||||
trying again.
|
||||
|
||||
For details on the fields, see the [configuration documentation](configuration.md#notifications).
|
||||
|
||||
A properly configured endpoint should lead to a log message from the registry
|
||||
upon startup:
|
||||
|
||||
```
|
||||
INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer <your token if needed>]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry
|
||||
```
|
||||
|
||||
## Events
|
||||
|
||||
Events have a well-defined JSON structure and are sent as the body of
|
||||
notification requests. One or more events are sent in a structure called an
|
||||
envelope. Each event has a unique ID that can be used to uniquely identify incoming
|
||||
requests, if required. Along with that, an _action_ is provided with a
|
||||
_target_, identifying the object mutated during the event.
|
||||
|
||||
The fields available in an `event` are described below.
|
||||
|
||||
Field | Type | Description
|
||||
----- | ----- | -------------
|
||||
id | string |ID provides a unique identifier for the event.
|
||||
timestamp | Time | Timestamp is the time at which the event occurred.
|
||||
action | string | Action indicates what action encompasses the provided event.
|
||||
target | distribution.Descriptor | Target uniquely describes the target of the event.
|
||||
length | int | Length in bytes of content. Same as Size field in Descriptor.
|
||||
repository | string | Repository identifies the named repository.
|
||||
fromRepository | string | FromRepository identifies the named repository which a blob was mounted from if appropriate.
|
||||
url | string | URL provides a direct link to the content.
|
||||
tag | string | Tag identifies a tag name in tag events.
|
||||
request | [RequestRecord](https://pkg.go.dev/github.com/distribution/distribution/notifications#RequestRecord) | Request covers the request that generated the event.
|
||||
actor | [ActorRecord](https://pkg.go.dev/github.com/distribution/distribution/notifications#ActorRecord). | Actor specifies the agent that initiated the event. For most situations, this could be from the authorization context of the request.
|
||||
source | [SourceRecord](https://pkg.go.dev/github.com/distribution/distribution/notifications#SourceRecord) | Source identifies the registry node that generated the event. Put differently, while the actor "initiates" the event, the source "generates" it.
|
||||
|
||||
|
||||
|
||||
The following is an example of a JSON event, sent in response to the pull of a
|
||||
manifest:
|
||||
|
||||
```json
|
||||
{
|
||||
"events": [
|
||||
{
|
||||
"id": "320678d8-ca14-430f-8bb6-4ca139cd83f7",
|
||||
"timestamp": "2016-03-09T14:44:26.402973972-08:00",
|
||||
"action": "pull",
|
||||
"target": {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
||||
"size": 708,
|
||||
"length": 708,
|
||||
"repository": "hello-world",
|
||||
"url": "http://192.168.100.227:5000/v2/hello-world/manifests/sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
||||
"tag": "latest"
|
||||
},
|
||||
"request": {
|
||||
"id": "6df24a34-0959-4923-81ca-14f09767db19",
|
||||
"addr": "192.168.64.11:42961",
|
||||
"host": "192.168.100.227:5000",
|
||||
"method": "GET",
|
||||
"useragent": "curl/7.38.0"
|
||||
},
|
||||
"actor": {},
|
||||
"source": {
|
||||
"addr": "xtal.local:5000",
|
||||
"instanceID": "a53db899-3b4b-4a62-a067-8dd013beaca4"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
The target struct of events which are sent when manifests and blobs are deleted
|
||||
contains a subset of the data contained in Get and Put events. Specifically,
|
||||
only the digest and repository are sent.
|
||||
|
||||
```json
|
||||
{
|
||||
"target": {
|
||||
"digest": "sha256:d89e1bee20d9cb344674e213b581f14fbd8e70274ecf9d10c514bab78a307845",
|
||||
"repository": "library/test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**: As of version 2.1, the `length` field for event targets
|
||||
> is being deprecated for the `size` field, bringing the target in line with
|
||||
> common nomenclature. Both will continue to be set for the foreseeable
|
||||
> future. Newer code should favor `size` but accept either.
|
||||
|
||||
## Envelope
|
||||
|
||||
The envelope contains one or more events, with the following json structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"events": [ "..." ]
|
||||
}
|
||||
```
|
||||
|
||||
While events may be sent in the same envelope, the set of events within that
|
||||
envelope have no implied relationship. For example, the registry may choose to
|
||||
group unrelated events and send them in the same envelope to reduce the total
|
||||
number of requests.
|
||||
|
||||
The full package has the mediatype
|
||||
"application/vnd.docker.distribution.events.v2+json", which is set on the
|
||||
request coming to an endpoint.
|
||||
|
||||
An example of a full event may look as follows:
|
||||
|
||||
```http request
|
||||
POST /callback HTTP/1.1
|
||||
Host: application/vnd.docker.distribution.events.v2+json
|
||||
Authorization: Bearer <your token, if needed>
|
||||
Content-Type: application/vnd.docker.distribution.events.v2+json
|
||||
|
||||
{
|
||||
"events": [
|
||||
{
|
||||
"id": "asdf-asdf-asdf-asdf-0",
|
||||
"timestamp": "2006-01-02T15:04:05Z",
|
||||
"action": "push",
|
||||
"target": {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
|
||||
"length": 1,
|
||||
"repository": "library/test",
|
||||
"url": "https://example.com/v2/library/test/manifests/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
||||
},
|
||||
"request": {
|
||||
"id": "asdfasdf",
|
||||
"addr": "client.local",
|
||||
"host": "registrycluster.local",
|
||||
"method": "PUT",
|
||||
"useragent": "test/0.1"
|
||||
},
|
||||
"actor": {
|
||||
"name": "test-actor"
|
||||
},
|
||||
"source": {
|
||||
"addr": "hostname.local:port"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "asdf-asdf-asdf-asdf-1",
|
||||
"timestamp": "2006-01-02T15:04:05Z",
|
||||
"action": "push",
|
||||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
"length": 2,
|
||||
"repository": "library/test",
|
||||
"url": "https://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
||||
},
|
||||
"request": {
|
||||
"id": "asdfasdf",
|
||||
"addr": "client.local",
|
||||
"host": "registrycluster.local",
|
||||
"method": "PUT",
|
||||
"useragent": "test/0.1"
|
||||
},
|
||||
"actor": {
|
||||
"name": "test-actor"
|
||||
},
|
||||
"source": {
|
||||
"addr": "hostname.local:port"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "asdf-asdf-asdf-asdf-2",
|
||||
"timestamp": "2006-01-02T15:04:05Z",
|
||||
"action": "push",
|
||||
"target": {
|
||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
"digest": "sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5",
|
||||
"length": 3,
|
||||
"repository": "library/test",
|
||||
"url": "https://example.com/v2/library/test/blobs/sha256:c3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
|
||||
},
|
||||
"request": {
|
||||
"id": "asdfasdf",
|
||||
"addr": "client.local",
|
||||
"host": "registrycluster.local",
|
||||
"method": "PUT",
|
||||
"useragent": "test/0.1"
|
||||
},
|
||||
"actor": {
|
||||
"name": "test-actor"
|
||||
},
|
||||
"source": {
|
||||
"addr": "hostname.local:port"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Responses
|
||||
|
||||
The registry is fairly accepting of the response codes from endpoints. If an
|
||||
endpoint responds with any 2xx or 3xx response code (after following
|
||||
redirects), the message is considered to have been delivered, and is discarded.
|
||||
|
||||
In turn, it is recommended that endpoints are accepting of incoming responses,
|
||||
as well. While the format of event envelopes are standardized by media type,
|
||||
any "pickyness" about validation may cause the queue to backup on the
|
||||
registry.
|
||||
|
||||
## Monitoring
|
||||
|
||||
The state of the endpoints are reported via the debug/vars http interface,
|
||||
usually configured to `http://localhost:5001/debug/vars`. Information such as
|
||||
configuration and metrics are available by endpoint.
|
||||
|
||||
The following provides an example of a few endpoints that have experienced
|
||||
several failures and have since recovered:
|
||||
|
||||
```json
|
||||
{
|
||||
"notifications": {
|
||||
"endpoints": [
|
||||
{
|
||||
"name": "local-5003",
|
||||
"url": "http://localhost:5003/callback",
|
||||
"Headers": {
|
||||
"Authorization": [
|
||||
"Bearer \u003can example token\u003e"
|
||||
]
|
||||
},
|
||||
"Timeout": 1000000000,
|
||||
"Threshold": 10,
|
||||
"Backoff": 1000000000,
|
||||
"Metrics": {
|
||||
"Pending": 76,
|
||||
"Events": 76,
|
||||
"Successes": 0,
|
||||
"Failures": 0,
|
||||
"Errors": 46,
|
||||
"Statuses": {
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "local-8083",
|
||||
"url": "http://localhost:8083/callback",
|
||||
"Headers": null,
|
||||
"Timeout": 1000000000,
|
||||
"Threshold": 10,
|
||||
"Backoff": 1000000000,
|
||||
"Metrics": {
|
||||
"Pending": 0,
|
||||
"Events": 76,
|
||||
"Successes": 76,
|
||||
"Failures": 0,
|
||||
"Errors": 28,
|
||||
"Statuses": {
|
||||
"202 Accepted": 76
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If using notification as part of a larger application, it is _critical_ to
|
||||
monitor the size ("Pending" above) of the endpoint queues. If failures or
|
||||
queue sizes are increasing, it can indicate a larger problem.
|
||||
|
||||
The logs are also a valuable resource for monitoring problems. A failing
|
||||
endpoint leads to messages similar to the following:
|
||||
|
||||
```none
|
||||
ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying
|
||||
WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off
|
||||
```
|
||||
|
||||
The above indicates that several errors caused a backoff and the registry
|
||||
waits before retrying.
|
||||
|
||||
## Considerations
|
||||
|
||||
Currently, the queues are inmemory, so endpoints should be _reasonably
|
||||
reliable_. They are designed to make a best-effort to send the messages but if
|
||||
an instance is lost, messages may be dropped. If an endpoint goes down, care
|
||||
should be taken to ensure that the registry instance is not terminated before
|
||||
the endpoint comes back up or messages are lost.
|
||||
|
||||
This can be mitigated by running endpoints in close proximity to the registry
|
||||
instances. One could run an endpoint that pages to disk and then forwards a
|
||||
request to provide better durability.
|
||||
|
||||
The notification system is designed around a series of interchangeable _sinks_
|
||||
which can be wired up to achieve interesting behavior. If this system doesn't
|
||||
provide acceptable guarantees, adding a transactional `Sink` to the registry
|
||||
is a possibility, although it may have an effect on request service time.
|
||||
See the
|
||||
[godoc](https://pkg.go.dev/github.com/distribution/distribution/notifications#Sink)
|
||||
for more information.
|
28
docs/content/recipes/_index.md
Normal file
28
docs/content/recipes/_index.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
description: Fun stuff to do with your registry
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, recipes, advanced
|
||||
title: Recipes overview
|
||||
---
|
||||
|
||||
This list of "recipes" provides end-to-end scenarios for exotic or otherwise advanced use-cases.
|
||||
These recipes are not useful for most standard set-ups.
|
||||
|
||||
## Requirements
|
||||
|
||||
Before following these steps, work through the [deployment guide](../about/deploying).
|
||||
|
||||
At this point, it's assumed that:
|
||||
|
||||
* you understand Docker security requirements, and how to configure your docker engines properly
|
||||
* you have installed Docker Compose
|
||||
* it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates
|
||||
* inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com`
|
||||
* be sure you have stopped and removed any previously running registry (typically `docker container stop registry && docker container rm -v registry`)
|
||||
|
||||
## The List
|
||||
|
||||
* [using Apache as an authenticating proxy](apache)
|
||||
* [using Nginx as an authenticating proxy](nginx)
|
||||
* [running a Registry on macOS](osx-setup-guide)
|
||||
* [mirror the Docker Hub](mirror)
|
||||
* [start registry via systemd](systemd)
|
217
docs/content/recipes/apache.md
Normal file
217
docs/content/recipes/apache.md
Normal file
|
@ -0,0 +1,217 @@
|
|||
---
|
||||
description: Restricting access to your registry using an apache proxy
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, authentication, proxy, apache, httpd, TLS, recipe, advanced
|
||||
title: Authenticate proxy with apache
|
||||
---
|
||||
|
||||
## Use-case
|
||||
|
||||
People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline.
|
||||
|
||||
Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal.
|
||||
|
||||
### Alternatives
|
||||
|
||||
If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../about/deploying.md#native-basic-auth).
|
||||
|
||||
### Solution
|
||||
|
||||
With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry.
|
||||
|
||||
While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the example.
|
||||
|
||||
We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage.
|
||||
|
||||
### Gotchas
|
||||
|
||||
While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself.
|
||||
|
||||
Furthermore, introducing an extra http layer in your communication pipeline adds complexity when deploying, maintaining, and debugging.
|
||||
|
||||
## Setting things up
|
||||
|
||||
Read again [the requirements](../#requirements).
|
||||
|
||||
Ready?
|
||||
|
||||
Run the following script:
|
||||
|
||||
```sh
|
||||
mkdir -p auth
|
||||
mkdir -p data
|
||||
|
||||
# This is the main apache configuration
|
||||
cat <<EOF > auth/httpd.conf
|
||||
LoadModule headers_module modules/mod_headers.so
|
||||
|
||||
LoadModule authn_file_module modules/mod_authn_file.so
|
||||
LoadModule authn_core_module modules/mod_authn_core.so
|
||||
LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
|
||||
LoadModule authz_user_module modules/mod_authz_user.so
|
||||
LoadModule authz_core_module modules/mod_authz_core.so
|
||||
LoadModule auth_basic_module modules/mod_auth_basic.so
|
||||
LoadModule access_compat_module modules/mod_access_compat.so
|
||||
|
||||
LoadModule log_config_module modules/mod_log_config.so
|
||||
|
||||
LoadModule ssl_module modules/mod_ssl.so
|
||||
|
||||
LoadModule proxy_module modules/mod_proxy.so
|
||||
LoadModule proxy_http_module modules/mod_proxy_http.so
|
||||
|
||||
LoadModule unixd_module modules/mod_unixd.so
|
||||
|
||||
<IfModule ssl_module>
|
||||
SSLRandomSeed startup builtin
|
||||
SSLRandomSeed connect builtin
|
||||
</IfModule>
|
||||
|
||||
<IfModule unixd_module>
|
||||
User daemon
|
||||
Group daemon
|
||||
</IfModule>
|
||||
|
||||
ServerAdmin you@example.com
|
||||
|
||||
ErrorLog /proc/self/fd/2
|
||||
|
||||
LogLevel warn
|
||||
|
||||
<IfModule log_config_module>
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b" common
|
||||
|
||||
<IfModule logio_module>
|
||||
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
|
||||
</IfModule>
|
||||
|
||||
CustomLog /proc/self/fd/1 common
|
||||
</IfModule>
|
||||
|
||||
ServerRoot "/usr/local/apache2"
|
||||
|
||||
Listen 5043
|
||||
|
||||
<Directory />
|
||||
AllowOverride none
|
||||
Require all denied
|
||||
</Directory>
|
||||
|
||||
<VirtualHost *:5043>
|
||||
|
||||
ServerName myregistrydomain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /usr/local/apache2/conf/domain.crt
|
||||
SSLCertificateKeyFile /usr/local/apache2/conf/domain.key
|
||||
|
||||
## SSL settings recommendation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html
|
||||
# Anti CRIME
|
||||
SSLCompression off
|
||||
|
||||
# POODLE and other stuff
|
||||
SSLProtocol all -SSLv2 -SSLv3 -TLSv1
|
||||
|
||||
# Secure cypher suites
|
||||
SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
|
||||
SSLHonorCipherOrder on
|
||||
|
||||
Header always set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0"
|
||||
RequestHeader set X-Forwarded-Proto "https"
|
||||
|
||||
ProxyRequests off
|
||||
ProxyPreserveHost on
|
||||
|
||||
# no proxy for /error/ (Apache HTTPd errors messages)
|
||||
ProxyPass /error/ !
|
||||
|
||||
ProxyPass /v2 http://registry:5000/v2
|
||||
ProxyPassReverse /v2 http://registry:5000/v2
|
||||
|
||||
<Location /v2>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
AuthName "Registry Authentication"
|
||||
AuthType basic
|
||||
AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd"
|
||||
AuthGroupFile "/usr/local/apache2/conf/httpd.groups"
|
||||
|
||||
# Read access to authentified users
|
||||
<Limit GET HEAD>
|
||||
Require valid-user
|
||||
</Limit>
|
||||
|
||||
# Write access to docker-deployer only
|
||||
<Limit POST PUT DELETE PATCH>
|
||||
Require group pusher
|
||||
</Limit>
|
||||
|
||||
</Location>
|
||||
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Now, create a password file for "testuser" and "testpassword"
|
||||
docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd
|
||||
# Create another one for "testuserpush" and "testpasswordpush"
|
||||
docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd
|
||||
|
||||
# Create your group file
|
||||
echo "pusher: testuserpush" > auth/httpd.groups
|
||||
|
||||
# Copy over your certificate files
|
||||
cp domain.crt auth
|
||||
cp domain.key auth
|
||||
|
||||
# Now create your compose file
|
||||
|
||||
cat <<EOF > docker-compose.yml
|
||||
apache:
|
||||
image: "httpd:2.4"
|
||||
hostname: myregistrydomain.com
|
||||
ports:
|
||||
- 5043:5043
|
||||
links:
|
||||
- registry:registry
|
||||
volumes:
|
||||
- `pwd`/auth:/usr/local/apache2/conf
|
||||
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 127.0.0.1:5000:5000
|
||||
volumes:
|
||||
- `pwd`/data:/var/lib/registry
|
||||
|
||||
EOF
|
||||
```
|
||||
|
||||
## Starting and stopping
|
||||
|
||||
Now, start your stack:
|
||||
|
||||
```console
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
Log in with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image:
|
||||
|
||||
```console
|
||||
$ docker login myregistrydomain.com:5043
|
||||
$ docker tag ubuntu myregistrydomain.com:5043/test
|
||||
$ docker push myregistrydomain.com:5043/test
|
||||
```
|
||||
|
||||
Now, log in with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image:
|
||||
|
||||
```console
|
||||
$ docker login myregistrydomain.com:5043
|
||||
$ docker pull myregistrydomain.com:5043/test
|
||||
```
|
||||
|
||||
Verify that the "pull-only" can NOT push:
|
||||
|
||||
```console
|
||||
$ docker push myregistrydomain.com:5043/test
|
||||
```
|
146
docs/content/recipes/mirror.md
Normal file
146
docs/content/recipes/mirror.md
Normal file
|
@ -0,0 +1,146 @@
|
|||
---
|
||||
description: Setting-up a local mirror for Docker Hub images
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, mirror, Hub, recipe, advanced
|
||||
title: Registry as a pull through cache
|
||||
---
|
||||
|
||||
## Use-case
|
||||
|
||||
If you have multiple consumers of containers running in your environment, such as
|
||||
multiple physical or virtual machines using containers, or a Kubernetes cluster,
|
||||
each consumer fetches an images it doesn't have locally, from the external registry.
|
||||
You can run a local registry mirror and point all your consumers
|
||||
there, to avoid this extra internet traffic.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Alternatively, if the set of images you are using is well delimited, you can
|
||||
simply pull them manually and push them to a simple, local, private registry.
|
||||
|
||||
Furthermore, if your images are all built in-house, not using the Hub at all and
|
||||
relying entirely on your local registry is the simplest scenario.
|
||||
|
||||
### Gotcha
|
||||
|
||||
It's currently possible to mirror only one upstream registry at a time.
|
||||
|
||||
The URL of a pull-through registry mirror must be the root of a domain.
|
||||
No path components other than an optional trailing slash (`/`) are allowed.
|
||||
The following table shows examples of allowed and disallowed mirror URLs.
|
||||
|
||||
| URL | Allowed |
|
||||
| -------------------------------------- | ------- |
|
||||
| `https://mirror.company.example` | Yes |
|
||||
| `https://mirror.company.example/` | Yes |
|
||||
| `https://mirror.company.example/foo` | No |
|
||||
| `https://mirror.company.example#bar` | No |
|
||||
| `https://mirror.company.example?baz=1` | No |
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Mirrors of Docker Hub are still subject to Docker's [fair usage policy](https://www.docker.com/pricing/resource-consumption-updates).
|
||||
|
||||
### Solution
|
||||
|
||||
The Registry can be configured as a pull through cache. In this mode a Registry
|
||||
responds to all normal docker pull requests but stores all content locally.
|
||||
|
||||
## How does it work?
|
||||
|
||||
The first time you request an image from your local registry mirror, it pulls
|
||||
the image from the public Docker registry and stores it locally before handing
|
||||
it back to you. On subsequent requests, the local registry mirror is able to
|
||||
serve the image from its own storage.
|
||||
|
||||
### What if the content changes on the Hub?
|
||||
|
||||
When a pull is attempted with a tag, the Registry checks the remote to
|
||||
ensure if it has the latest version of the requested content. Otherwise, it
|
||||
fetches and caches the latest content.
|
||||
|
||||
### What about my disk?
|
||||
|
||||
In environments with high churn rates, stale data can build up in the cache.
|
||||
When running as a pull through cache the Registry periodically removes old
|
||||
content to save disk space. Subsequent requests for removed content causes a
|
||||
remote fetch and local re-caching.
|
||||
|
||||
To ensure best performance and guarantee correctness the Registry cache should
|
||||
be configured to use the `filesystem` driver for storage.
|
||||
|
||||
## Run a Registry as a pull-through cache
|
||||
|
||||
The easiest way to run a registry as a pull through cache is to run the official
|
||||
Registry image.
|
||||
At least, you need to specify `proxy.remoteurl` within `/etc/distribution/config.yml`
|
||||
as described in the following subsection.
|
||||
|
||||
Multiple registry caches can be deployed over the same back-end. A single
|
||||
registry cache ensures that concurrent requests do not pull duplicate data,
|
||||
but this property does not hold true for a registry cache cluster.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Service accounts included in the Team plan are limited to 5,000 pulls per day.
|
||||
> See [Service Accounts](https://docs.docker.com/docker-hub/service-accounts/) for more details.
|
||||
|
||||
### Configure the cache
|
||||
|
||||
To configure a Registry to run as a pull through cache, the addition of a
|
||||
`proxy` section is required to the config file.
|
||||
|
||||
To access private images on the Docker Hub, a username and password can
|
||||
be supplied.
|
||||
|
||||
```yaml
|
||||
proxy:
|
||||
remoteurl: https://registry-1.docker.io
|
||||
username: [username]
|
||||
password: [password]
|
||||
ttl: 168h
|
||||
```
|
||||
|
||||
> **Warning**: If you specify a username and password, it's very important to
|
||||
> understand that private resources that this user has access to Docker Hub is
|
||||
> made available on your mirror. **You must secure your mirror** by
|
||||
> implementing authentication if you expect these resources to stay private!
|
||||
|
||||
> **Warning**: For the scheduler to clean up old entries, `delete` must
|
||||
> be enabled in the registry configuration. See
|
||||
> [Registry Configuration](../about/configuration.md) for more details.
|
||||
|
||||
### Configure the Docker daemon
|
||||
|
||||
Either pass the `--registry-mirror` option when starting `dockerd` manually,
|
||||
or edit [`/etc/docker/daemon.json`](https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file)
|
||||
and add the `registry-mirrors` key and value, to make the change persistent.
|
||||
|
||||
```json
|
||||
{
|
||||
"registry-mirrors": ["https://mirror.company.example"]
|
||||
}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> The mirror URL must be the root of the domain.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Currently Docker daemon supports only mirrors of Docker Hub.
|
||||
> It is not possible to run the Docker daemon against a pull through cache with another upstream registry.
|
||||
|
||||
Save the file and reload Docker for the change to take effect.
|
||||
|
||||
> Some log messages that appear to be errors are actually informational messages.
|
||||
>
|
||||
> Check the `level` field to determine whether
|
||||
> the message is warning you about an error or is giving you information.
|
||||
> For example, this log message is informational:
|
||||
>
|
||||
> ```conf
|
||||
> time="2017-06-02T15:47:37Z" level=info msg="error statting local store, serving from upstream: unknown blob" go.version=go1.7.4
|
||||
> ```
|
||||
>
|
||||
> It's telling you that the file doesn't exist yet in the local cache and is
|
||||
> being pulled from upstream.
|
207
docs/content/recipes/nginx.md
Normal file
207
docs/content/recipes/nginx.md
Normal file
|
@ -0,0 +1,207 @@
|
|||
---
|
||||
description: Restricting access to your registry using a nginx proxy
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, nginx, proxy, authentication, TLS, recipe, advanced
|
||||
title: Authenticate proxy with nginx
|
||||
---
|
||||
|
||||
## Use-case
|
||||
|
||||
People already relying on a nginx proxy to authenticate their users to other
|
||||
services might want to leverage it and have Registry communications tunneled
|
||||
through the same pipeline.
|
||||
|
||||
Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO
|
||||
mechanism fronting their internal http portal.
|
||||
|
||||
### Alternatives
|
||||
|
||||
If you just want authentication for your registry, and are happy maintaining
|
||||
users access separately, you should really consider sticking with the native
|
||||
[basic auth registry feature](../about/deploying.md#native-basic-auth).
|
||||
|
||||
### Solution
|
||||
|
||||
With the method presented here, you implement basic authentication for docker
|
||||
engines in a reverse proxy that sits in front of your registry.
|
||||
|
||||
While we use a simple htpasswd file as an example, any other nginx
|
||||
authentication backend should be fairly easy to implement once you are done with
|
||||
the example.
|
||||
|
||||
We also implement push restriction (to a limited user group) for the sake of the
|
||||
example. Again, you should modify this to fit your mileage.
|
||||
|
||||
### Gotchas
|
||||
|
||||
While this model gives you the ability to use whatever authentication backend
|
||||
you want through the secondary authentication mechanism implemented inside your
|
||||
proxy, it also requires that you move TLS termination from the Registry to the
|
||||
proxy itself.
|
||||
|
||||
> **Note**: It is not recommended to bind your registry to `localhost:5000` without
|
||||
> authentication. This creates a potential loophole in your registry security.
|
||||
> As a result, anyone who can log on to the server where your registry is running
|
||||
> can push images without authentication.
|
||||
|
||||
Furthermore, introducing an extra http layer in your communication pipeline
|
||||
makes it more complex to deploy, maintain, and debug. Make sure the extra
|
||||
complexity is required.
|
||||
|
||||
For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets
|
||||
the following client header:
|
||||
|
||||
```none
|
||||
X-Real-IP
|
||||
X-Forwarded-For
|
||||
X-Forwarded-Proto
|
||||
```
|
||||
|
||||
So if you have an Nginx instance sitting behind it, remove these lines from the
|
||||
example config below:
|
||||
|
||||
```none
|
||||
proxy_set_header Host $http_host; # required for docker client's sake
|
||||
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
```
|
||||
|
||||
Otherwise Nginx resets the ELB's values, and the requests are not routed
|
||||
properly. For more information, see
|
||||
[#970](https://github.com/distribution/distribution/issues/970).
|
||||
|
||||
## Setting things up
|
||||
|
||||
Review the [requirements](../#requirements), then follow these steps.
|
||||
|
||||
1. Create the required directories
|
||||
|
||||
```console
|
||||
$ mkdir -p auth data
|
||||
```
|
||||
|
||||
2. Create the main nginx configuration. Paste this code block into a new file called `auth/nginx.conf`:
|
||||
|
||||
```conf
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
upstream docker-registry {
|
||||
server registry:5000;
|
||||
}
|
||||
|
||||
## Set a variable to help us decide if we need to add the
|
||||
## 'Docker-Distribution-Api-Version' header.
|
||||
## The registry always sets this header.
|
||||
## In the case of nginx performing auth, the header is unset
|
||||
## since nginx is auth-ing before proxying.
|
||||
map $upstream_http_docker_distribution_api_version $docker_distribution_api_version {
|
||||
'' 'registry/2.0';
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name myregistrydomain.com;
|
||||
|
||||
# SSL
|
||||
ssl_certificate /etc/nginx/conf.d/domain.crt;
|
||||
ssl_certificate_key /etc/nginx/conf.d/domain.key;
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.1 TLSv1.2;
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
# disable any limits to avoid HTTP 413 for large image uploads
|
||||
client_max_body_size 0;
|
||||
|
||||
# required to avoid HTTP 411: see Issue #1486 (https://github.com/moby/moby/issues/1486)
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
location /v2/ {
|
||||
# Do not allow connections from docker 1.5 and earlier
|
||||
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
|
||||
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
|
||||
return 404;
|
||||
}
|
||||
|
||||
# To add basic authentication to v2 use auth_basic setting.
|
||||
auth_basic "Registry realm";
|
||||
auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd;
|
||||
|
||||
## If $docker_distribution_api_version is empty, the header is not added.
|
||||
## See the map directive above where this variable is defined.
|
||||
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
|
||||
|
||||
proxy_pass http://docker-registry;
|
||||
proxy_set_header Host $http_host; # required for docker client's sake
|
||||
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 900;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Create a password file `auth/nginx.htpasswd` for "testuser" and "testpassword".
|
||||
|
||||
```console
|
||||
$ docker run --rm --entrypoint htpasswd httpd -Bbn testuser testpassword > auth/nginx.htpasswd
|
||||
```
|
||||
|
||||
> **Note**: If you do not want to use `bcrypt`, you can omit the `-B` parameter.
|
||||
|
||||
4. Copy your certificate files to the `auth/` directory.
|
||||
|
||||
```console
|
||||
$ cp domain.crt auth
|
||||
$ cp domain.key auth
|
||||
```
|
||||
|
||||
5. Create the compose file. Paste the following YAML into a new file called `docker-compose.yml`.
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
# Note : Only nginx:alpine supports bcrypt.
|
||||
# If you don't need to use bcrypt, you can use a different tag.
|
||||
# Ref. https://github.com/nginxinc/docker-nginx/issues/29
|
||||
image: "nginx:alpine"
|
||||
ports:
|
||||
- 5043:443
|
||||
depends_on:
|
||||
- registry
|
||||
volumes:
|
||||
- ./auth:/etc/nginx/conf.d
|
||||
- ./auth/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
|
||||
registry:
|
||||
image: registry:2
|
||||
volumes:
|
||||
- ./data:/var/lib/registry
|
||||
```
|
||||
|
||||
## Starting and stopping
|
||||
|
||||
Now, start your stack:
|
||||
|
||||
```consonle
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
Login with a "push" authorized user (using `testuser` and `testpassword`), then
|
||||
tag and push your first image:
|
||||
|
||||
```console
|
||||
$ docker login -u=testuser -p=testpassword -e=root@example.ch myregistrydomain.com:5043
|
||||
$ docker tag ubuntu myregistrydomain.com:5043/test
|
||||
$ docker push myregistrydomain.com:5043/test
|
||||
$ docker pull myregistrydomain.com:5043/test
|
||||
```
|
90
docs/content/recipes/osx-setup-guide.md
Normal file
90
docs/content/recipes/osx-setup-guide.md
Normal file
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
description: Explains how to run a registry on macOS
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, macOS, recipe, advanced
|
||||
title: macOS setup guide
|
||||
---
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to run a registry server natively on macOS.
|
||||
|
||||
### Alternatives
|
||||
|
||||
You can start a VM on macOS, and deploy your registry normally as a container using Docker inside that VM.
|
||||
|
||||
### Solution
|
||||
|
||||
Using the method described here, you install and compile your own from the git repository and run it as an macOS agent.
|
||||
|
||||
### Gotchas
|
||||
|
||||
Production services operation on macOS is out of scope of this document. Be sure you understand well these aspects before considering going to production with this.
|
||||
|
||||
## Setup golang on your machine
|
||||
|
||||
If you know, safely skip to the next section.
|
||||
|
||||
If you don't, the TLDR is:
|
||||
|
||||
```console
|
||||
$ bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
||||
$ source ~/.gvm/scripts/gvm
|
||||
$ gvm install go1.4.2
|
||||
$ gvm use go1.4.2
|
||||
```
|
||||
|
||||
If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html).
|
||||
|
||||
## Checkout the source tree
|
||||
|
||||
```console
|
||||
$ mkdir -p $GOPATH/src/github.com/distribution
|
||||
$ git clone https://github.com/distribution/distribution.git $GOPATH/src/github.com/distribution/distribution
|
||||
$ cd $GOPATH/src/github.com/distribution/distribution
|
||||
```
|
||||
|
||||
## Build the binary
|
||||
|
||||
```console
|
||||
$ GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries
|
||||
$ sudo mkdir -p /usr/local/libexec
|
||||
$ sudo cp bin/registry /usr/local/libexec/registry
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
Copy the registry configuration file in place:
|
||||
|
||||
```console
|
||||
$ mkdir /Users/Shared/Registry
|
||||
$ cp docs/osx/config.yml /Users/Shared/Registry/config.yml
|
||||
```
|
||||
|
||||
## Run the registry under launchd
|
||||
|
||||
Copy the registry plist into place:
|
||||
|
||||
```console
|
||||
$ plutil -lint docs/recipes/osx/com.docker.registry.plist
|
||||
$ cp docs/recipes/osx/com.docker.registry.plist ~/Library/LaunchAgents/
|
||||
$ chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
||||
|
||||
Start the registry:
|
||||
|
||||
```console
|
||||
$ launchctl load ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
||||
|
||||
### Restart the registry service
|
||||
|
||||
```console
|
||||
$ launchctl stop com.docker.registry
|
||||
$ launchctl start com.docker.registry
|
||||
```
|
||||
|
||||
### Unload the registry service
|
||||
|
||||
```console
|
||||
$ launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist
|
||||
```
|
42
docs/content/recipes/osx/com.docker.registry.plist
Normal file
42
docs/content/recipes/osx/com.docker.registry.plist
Normal file
|
@ -0,0 +1,42 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>com.docker.registry</string>
|
||||
<key>KeepAlive</key>
|
||||
<true/>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/Users/Shared/Registry/registry.log</string>
|
||||
<key>StandardOutPath</key>
|
||||
<string>/Users/Shared/Registry/registry.log</string>
|
||||
<key>Program</key>
|
||||
<string>/usr/local/libexec/registry</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/usr/local/libexec/registry</string>
|
||||
<string>/Users/Shared/Registry/config.yml</string>
|
||||
</array>
|
||||
<key>Sockets</key>
|
||||
<dict>
|
||||
<key>http-listen-address</key>
|
||||
<dict>
|
||||
<key>SockServiceName</key>
|
||||
<string>5000</string>
|
||||
<key>SockType</key>
|
||||
<string>dgram</string>
|
||||
<key>SockFamily</key>
|
||||
<string>IPv4</string>
|
||||
</dict>
|
||||
<key>http-debug-address</key>
|
||||
<dict>
|
||||
<key>SockServiceName</key>
|
||||
<string>5001</string>
|
||||
<key>SockType</key>
|
||||
<string>dgram</string>
|
||||
<key>SockFamily</key>
|
||||
<string>IPv4</string>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
16
docs/content/recipes/osx/config.yml
Normal file
16
docs/content/recipes/osx/config.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
fields:
|
||||
service: registry
|
||||
environment: macbook-air
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
filesystem:
|
||||
rootdirectory: /Users/Shared/Registry
|
||||
http:
|
||||
addr: 0.0.0.0:5000
|
||||
secret: mytokensecret
|
||||
debug:
|
||||
addr: localhost:5001
|
108
docs/content/recipes/systemd.md
Normal file
108
docs/content/recipes/systemd.md
Normal file
|
@ -0,0 +1,108 @@
|
|||
---
|
||||
description: Using systemd to manage registry container
|
||||
keywords: registry, on-prem, systemd, socket-activated, recipe, advanced
|
||||
title: Start registry via systemd
|
||||
---
|
||||
|
||||
## Use-case
|
||||
|
||||
Using systemd to manage containers can make service discovery and maintenance easier
|
||||
by managing all services in the same way. Additionally, when using Podman, systemd
|
||||
can start the registry with socket-activation, providing additional security options:
|
||||
|
||||
* Run as non-root and expose on a low-numbered socket (< 1024)
|
||||
* Run with `--network=none`
|
||||
|
||||
### Docker
|
||||
|
||||
When deploying the registry via Docker, a simple service file can be used to manage
|
||||
the registry:
|
||||
|
||||
registry.service
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Distribution registry
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
#TimeoutStartSec=0
|
||||
Restart=always
|
||||
ExecStartPre=-/usr/bin/docker stop %N
|
||||
ExecStartPre=-/usr/bin/docker rm %N
|
||||
ExecStart=/usr/bin/docker run --name %N \
|
||||
-v registry:/var/lib/registry \
|
||||
-p 5000:5000 \
|
||||
registry:2
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
In this case, the registry will store images in the named-volume `registry`.
|
||||
Note that the container is destroyed on restart instead of using `--rm` or
|
||||
destroy on stop. This is done to make accessing `docker logs ...` easier in
|
||||
the case of issues.
|
||||
|
||||
### Podman
|
||||
|
||||
Podman offers tighter integration with systemd than Docker does, and supports
|
||||
socket-activation of containers.
|
||||
|
||||
#### Create service file
|
||||
|
||||
```sh
|
||||
podman create --name registry --network=none -v registry:/var/lib/registry registry:2
|
||||
podman generate systemd --name --new registry > registry.service
|
||||
```
|
||||
|
||||
#### Create socket file
|
||||
|
||||
registry.socket
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Distribution registry
|
||||
|
||||
[Socket]
|
||||
ListenStream=5000
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
||||
```
|
||||
|
||||
### Installation
|
||||
|
||||
Installation can be either rootful or rootless. For Docker, rootless configurations
|
||||
often include additional setup steps that are beyond the scope of this recipe, whereas
|
||||
for Podman, rootless containers generally work out of the box.
|
||||
|
||||
#### Rootful
|
||||
|
||||
Run as root:
|
||||
|
||||
* Copy registry.service (and registry.socket if relevant) to /etc/systemd/service/
|
||||
* Run `systemctl daemon-reload`
|
||||
* Enable the service:
|
||||
* When using socket activation: `systemctl enable registry.socket`
|
||||
* When **not** using socket activation: `systemctl enable registry.service`
|
||||
* Start the service:
|
||||
* When using socket activation: `systemctl start registry.socket`
|
||||
* When **not** using socket activation: `systemctl start registry.service`
|
||||
|
||||
#### Rootless
|
||||
|
||||
Run as the target user:
|
||||
|
||||
* Copy registry.service (and registry.socket if relevant) to ~/.config/systemd/user/
|
||||
* Run `systemctl --user daemon-reload`
|
||||
* Enable the service:
|
||||
* When using socket activation: `systemctl --user enable registry.socket`
|
||||
* When **not** using socket activation: `systemctl --user enable registry.service`
|
||||
* Start the service:
|
||||
* When using socket activation: `systemctl --user start registry.socket`
|
||||
* When **not** using socket activation: `systemctl --user start registry.service`
|
||||
|
||||
**Note**: To have rootless services start on boot, it may be necessary to enable linger
|
||||
via `loginctl enable-linger $USER`.
|
12
docs/content/spec/_index.md
Normal file
12
docs/content/spec/_index.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
title: "Reference Overview"
|
||||
description: "Explains registry JSON objects"
|
||||
keywords: registry, service, images, repository, json
|
||||
---
|
||||
|
||||
# Docker Registry Reference
|
||||
|
||||
* [HTTP API V2](api.md)
|
||||
* [Storage Driver](../storage-drivers/_index.md)
|
||||
* [Token Authentication Specification](auth/token.md)
|
||||
* [Token Authentication Implementation](auth/jwt.md)
|
4915
docs/content/spec/api.md
Normal file
4915
docs/content/spec/api.md
Normal file
File diff suppressed because it is too large
Load diff
1204
docs/content/spec/api.md.tmpl
Normal file
1204
docs/content/spec/api.md.tmpl
Normal file
File diff suppressed because it is too large
Load diff
12
docs/content/spec/auth/_index.md
Normal file
12
docs/content/spec/auth/_index.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
title: "Distribution Registry Token Authentication"
|
||||
description: "Distribution Registry v2 authentication schema"
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, authentication, advanced
|
||||
---
|
||||
|
||||
# Distribution Registry v2 authentication
|
||||
|
||||
See the [Token Authentication Specification](token),
|
||||
[Token Authentication Implementation](jwt),
|
||||
[Token Scope Documentation](scope),
|
||||
[OAuth2 Token Authentication](oauth) for more information.
|
308
docs/content/spec/auth/jwt.md
Normal file
308
docs/content/spec/auth/jwt.md
Normal file
|
@ -0,0 +1,308 @@
|
|||
---
|
||||
title: "Token Authentication Implementation"
|
||||
description: "Describe the reference implementation of the Distribution Registry v2 authentication schema"
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced
|
||||
---
|
||||
|
||||
# Distribution Registry v2 Bearer token specification
|
||||
|
||||
This specification covers the `distribution/distribution` implementation of the
|
||||
v2 Registry's authentication schema. Specifically, it describes the JSON
|
||||
Web Token schema that `distribution/distribution` has adopted to implement the
|
||||
client-opaque Bearer token issued by an authentication service and
|
||||
understood by the registry.
|
||||
|
||||
This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32)
|
||||
|
||||
## Getting a Bearer Token
|
||||
|
||||
For this example, the client makes an HTTP GET request to the following URL:
|
||||
|
||||
```
|
||||
https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push
|
||||
```
|
||||
|
||||
The token server should first attempt to authenticate the client using any
|
||||
authentication credentials provided with the request. As of Docker 1.8, the
|
||||
registry client in the Docker Engine only supports Basic Authentication to
|
||||
these token servers. If an attempt to authenticate to the token server fails,
|
||||
the token server should return a `401 Unauthorized` response indicating that
|
||||
the provided credentials are invalid.
|
||||
|
||||
Whether the token server requires authentication is up to the policy of that
|
||||
access control provider. Some requests may require authentication to determine
|
||||
access (such as pushing or pulling a private repository) while others may not
|
||||
(such as pulling from a public repository).
|
||||
|
||||
After authenticating the client (which may simply be an anonymous client if
|
||||
no attempt was made to authenticate), the token server must next query its
|
||||
access control list to determine whether the client has the requested scope. In
|
||||
this example request, if I have authenticated as user `jlhawn`, the token
|
||||
server will determine what access I have to the repository `samalba/my-app`
|
||||
hosted by the entity `registry.docker.io`.
|
||||
|
||||
Once the token server has determined what access the client has to the
|
||||
resources requested in the `scope` parameter, it will take the intersection of
|
||||
the set of requested actions on each resource and the set of actions that the
|
||||
client has in fact been granted. If the client only has a subset of the
|
||||
requested access **it must not be considered an error** as it is not the
|
||||
responsibility of the token server to indicate authorization errors as part of
|
||||
this workflow.
|
||||
|
||||
Continuing with the example request, the token server will find that the
|
||||
client's set of granted access to the repository is `[pull, push]` which when
|
||||
intersected with the requested access `[pull, push]` yields an equal set. If
|
||||
the granted access set was found only to be `[pull]` then the intersected set
|
||||
would only be `[pull]`. If the client has no access to the repository then the
|
||||
intersected set would be empty, `[]`.
|
||||
|
||||
It is this intersected set of access which is placed in the returned token.
|
||||
|
||||
The server will now construct a JSON Web Token to sign and return. A JSON Web
|
||||
Token has 3 main parts:
|
||||
|
||||
1. Headers
|
||||
|
||||
The header of a JSON Web Token is a standard JOSE header. The "typ" field
|
||||
will be "JWT" and it will also contain the "alg" which identifies the
|
||||
signing algorithm used to produce the signature. It also must have a "kid"
|
||||
field, representing the ID of the key which was used to sign the token.
|
||||
|
||||
It specifies that this object is going to be a JSON Web token signed using
|
||||
the key with the given ID using the Elliptic Curve signature algorithm
|
||||
using a SHA256 hash.
|
||||
|
||||
2. Claim Set
|
||||
|
||||
The Claim Set is a JSON struct containing these standard registered claim
|
||||
name fields:
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>iss</code> (Issuer)
|
||||
</dt>
|
||||
<dd>
|
||||
The issuer of the token, typically the fqdn of the authorization
|
||||
server.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>sub</code> (Subject)
|
||||
</dt>
|
||||
<dd>
|
||||
The subject of the token; the name or id of the client which
|
||||
requested it. This should be empty (`""`) if the client did not
|
||||
authenticate.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>aud</code> (Audience)
|
||||
</dt>
|
||||
<dd>
|
||||
The intended audience of the token; the name or id of the service
|
||||
which will verify the token to authorize the client/subject.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>exp</code> (Expiration)
|
||||
</dt>
|
||||
<dd>
|
||||
The token should only be considered valid up to this specified date
|
||||
and time.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>nbf</code> (Not Before)
|
||||
</dt>
|
||||
<dd>
|
||||
The token should not be considered valid before this specified date
|
||||
and time.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>iat</code> (Issued At)
|
||||
</dt>
|
||||
<dd>
|
||||
Specifies the date and time which the Authorization server
|
||||
generated this token.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>jti</code> (JWT ID)
|
||||
</dt>
|
||||
<dd>
|
||||
A unique identifier for this token. Can be used by the intended
|
||||
audience to prevent replays of the token.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
The Claim Set will also contain a private claim name unique to this
|
||||
authorization server specification:
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>access</code>
|
||||
</dt>
|
||||
<dd>
|
||||
An array of access entry objects with the following fields:
|
||||
<dl>
|
||||
<dt>
|
||||
<code>type</code>
|
||||
</dt>
|
||||
<dd>
|
||||
The type of resource hosted by the service.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>name</code>
|
||||
</dt>
|
||||
<dd>
|
||||
The name of the resource of the given type hosted by the
|
||||
service.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>actions</code>
|
||||
</dt>
|
||||
<dd>
|
||||
An array of strings which give the actions authorized on
|
||||
this resource.
|
||||
</dd>
|
||||
</dl>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
Here is an example of such a JWT Claim Set (formatted with whitespace for
|
||||
readability):
|
||||
|
||||
```
|
||||
{
|
||||
"iss": "auth.docker.com",
|
||||
"sub": "jlhawn",
|
||||
"aud": "registry.docker.com",
|
||||
"exp": 1415387315,
|
||||
"nbf": 1415387015,
|
||||
"iat": 1415387015,
|
||||
"jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws",
|
||||
"access": [
|
||||
{
|
||||
"type": "repository",
|
||||
"name": "samalba/my-app",
|
||||
"actions": [
|
||||
"pull",
|
||||
"push"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. Signature
|
||||
|
||||
The authorization server will produce a JOSE header and Claim Set with no
|
||||
extraneous whitespace, i.e., the JOSE Header from above would be
|
||||
|
||||
```
|
||||
{"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"}
|
||||
```
|
||||
|
||||
and the Claim Set from above would be
|
||||
|
||||
```
|
||||
{"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]}
|
||||
```
|
||||
|
||||
The utf-8 representation of this JOSE header and Claim Set are then
|
||||
url-safe base64 encoded (sans trailing '=' buffer), producing:
|
||||
|
||||
```
|
||||
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0
|
||||
```
|
||||
|
||||
for the JOSE Header and
|
||||
|
||||
```
|
||||
eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0
|
||||
```
|
||||
|
||||
for the Claim Set. These two are concatenated using a '.' character,
|
||||
yielding the string:
|
||||
|
||||
```
|
||||
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0
|
||||
```
|
||||
|
||||
This is then used as the payload to a the `ES256` signature algorithm
|
||||
specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA)
|
||||
draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4)
|
||||
|
||||
This example signature will use the following ECDSA key for the server:
|
||||
|
||||
```
|
||||
{
|
||||
"kty": "EC",
|
||||
"crv": "P-256",
|
||||
"kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6",
|
||||
"d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA",
|
||||
"x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q",
|
||||
"y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc"
|
||||
}
|
||||
```
|
||||
|
||||
A resulting signature of the above payload using this key is:
|
||||
|
||||
```
|
||||
QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w
|
||||
```
|
||||
|
||||
Concatenating all of these together with a `.` character gives the
|
||||
resulting JWT:
|
||||
|
||||
```
|
||||
eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w
|
||||
```
|
||||
|
||||
This can now be placed in an HTTP response and returned to the client to use to
|
||||
authenticate to the audience service:
|
||||
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"}
|
||||
```
|
||||
|
||||
## Using the signed token
|
||||
|
||||
Once the client has a token, it will try the registry request again with the
|
||||
token placed in the HTTP `Authorization` header like so:
|
||||
|
||||
```
|
||||
Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw
|
||||
```
|
||||
|
||||
This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1)
|
||||
|
||||
## Verifying the token
|
||||
|
||||
The registry must now verify the token presented by the user by inspecting the
|
||||
claim set within. The registry will:
|
||||
|
||||
- Ensure that the issuer (`iss` claim) is an authority it trusts.
|
||||
- Ensure that the registry identifies as the audience (`aud` claim).
|
||||
- Check that the current time is between the `nbf` and `exp` claim times.
|
||||
- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has
|
||||
not been seen before.
|
||||
- To enforce this, the registry may keep a record of `jti`s it has seen for
|
||||
up to the `exp` time of the token to prevent token replays.
|
||||
- Check the `access` claim value and use the identified resources and the list
|
||||
of actions authorized to determine whether the token grants the required
|
||||
level of access for the operation the client is attempting to perform.
|
||||
- Verify that the signature of the token is valid.
|
||||
|
||||
If any of these requirements are not met, the registry will return a
|
||||
`403 Forbidden` response to indicate that the token is invalid.
|
||||
|
||||
**Note**: it is only at this point in the workflow that an authorization error
|
||||
may occur. The token server should *not* return errors when the user does not
|
||||
have the requested authorization. Instead, the returned token should indicate
|
||||
whatever of the requested scope the client does have (the intersection of
|
||||
requested and granted access). If the token does not supply proper
|
||||
authorization then the registry will return the appropriate error.
|
||||
|
||||
At no point in this process should the registry need to call back to the
|
||||
authorization server. The registry only needs to be supplied with the trusted
|
||||
public keys to verify the token signatures.
|
190
docs/content/spec/auth/oauth.md
Normal file
190
docs/content/spec/auth/oauth.md
Normal file
|
@ -0,0 +1,190 @@
|
|||
---
|
||||
title: "Oauth2 Token Authentication"
|
||||
description: "Specifies the Distribution Registry v2 authentication"
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, oauth2, advanced
|
||||
---
|
||||
|
||||
# Distribution Registry v2 authentication using OAuth2
|
||||
|
||||
This document describes support for the OAuth2 protocol within the authorization
|
||||
server. [RFC6749](https://tools.ietf.org/html/rfc6749) should be used as a
|
||||
reference for the protocol and HTTP endpoints described here.
|
||||
|
||||
**Note**: Not all token servers implement oauth2. If the request to the endpoint
|
||||
returns `404` using the HTTP `POST` method, refer to
|
||||
[Token Documentation](token.md) for using the HTTP `GET` method supported by all
|
||||
token servers.
|
||||
|
||||
## Refresh token format
|
||||
|
||||
The format of the refresh token is completely opaque to the client and should be
|
||||
determined by the authorization server. The authorization should ensure the
|
||||
token is sufficiently long and is responsible for storing any information about
|
||||
long-lived tokens which may be needed for revoking. Any information stored
|
||||
inside the token will not be extracted and presented by clients.
|
||||
|
||||
## Getting a token
|
||||
|
||||
POST /token
|
||||
|
||||
#### Headers
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
#### Post parameters
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>grant_type</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) Type of grant used to get token. When getting a refresh token
|
||||
using credentials this type should be set to "password" and have the
|
||||
accompanying username and password parameters. Type "authorization_code"
|
||||
is reserved for future use for authenticating to an authorization server
|
||||
without having to send credentials directly from the client. When
|
||||
requesting an access token with a refresh token this should be set to
|
||||
"refresh_token".
|
||||
</dd>
|
||||
<dt>
|
||||
<code>service</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) The name of the service which hosts the resource to get
|
||||
access for. Refresh tokens will only be good for getting tokens for
|
||||
this service.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>client_id</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) String identifying the client. This client_id does not need
|
||||
to be registered with the authorization server but should be set to a
|
||||
meaningful value in order to allow auditing keys created by unregistered
|
||||
clients. Accepted syntax is defined in
|
||||
<a href="https://tools.ietf.org/html/rfc6749#appendix-A.1" rel="noopener noreferrer nofollow" target="_blank">RFC6749 Appendix A.1</a>.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>access_type</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(OPTIONAL) Access which is being requested. If "offline" is provided
|
||||
then a refresh token will be returned. The default is "online" only
|
||||
returning short lived access token. If the grant type is "refresh_token"
|
||||
this will only return the same refresh token and not a new one.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>scope</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(OPTIONAL) The resource in question, formatted as one of the space-delimited
|
||||
entries from the <code>scope</code> parameters from the <code>WWW-Authenticate</code> header
|
||||
shown above. This query parameter should only be specified once but may
|
||||
contain multiple scopes using the scope list format defined in the scope
|
||||
grammar. If multiple <code>scope</code> is provided from
|
||||
<code>WWW-Authenticate</code> header the scopes should first be
|
||||
converted to a scope list before requesting the token. The above example
|
||||
would be specified as: <code>scope=repository:samalba/my-app:push</code>.
|
||||
When requesting a refresh token the scopes may be empty since the
|
||||
refresh token will not be limited by this scope, only the provided short
|
||||
lived access token will have the scope limitation.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>refresh_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(OPTIONAL) The refresh token to use for authentication when grant type "refresh_token" is used.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>username</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(OPTIONAL) The username to use for authentication when grant type "password" is used.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>password</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(OPTIONAL) The password to use for authentication when grant type "password" is used.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
#### Response fields
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>access_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) An opaque <code>Bearer</code> token that clients should
|
||||
supply to subsequent requests in the <code>Authorization</code> header.
|
||||
This token should not be attempted to be parsed or understood by the
|
||||
client but treated as opaque string.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>scope</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) The scope granted inside the access token. This may be the
|
||||
same scope as requested or a subset. This requirement is stronger than
|
||||
specified in <a href="https://tools.ietf.org/html/rfc6749#section-4.2.2" rel="noopener noreferrer nofollow" target="_blank">RFC6749 Section 4.2.2</a>
|
||||
by strictly requiring the scope in the return value.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>expires_in</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(REQUIRED) The duration in seconds since the token was issued that it
|
||||
will remain valid. When omitted, this defaults to 60 seconds. For
|
||||
compatibility with older clients, a token should never be returned with
|
||||
less than 60 seconds to live.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>issued_at</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(Optional) The <a href="https://www.ietf.org/rfc/rfc3339.txt" rel="noopener noreferrer nofollow" target="_blank">RFC3339</a>-serialized UTC
|
||||
standard time at which a given token was issued. If <code>issued_at</code> is omitted, the
|
||||
expiration is from when the token exchange completed.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>refresh_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(Optional) Token which can be used to get additional access tokens for
|
||||
the same subject with different scopes. This token should be kept secure
|
||||
by the client and only sent to the authorization server which issues
|
||||
bearer tokens. This field will only be set when `access_type=offline` is
|
||||
provided in the request.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
#### Example getting refresh token
|
||||
|
||||
```none
|
||||
POST /token HTTP/1.1
|
||||
Host: auth.docker.io
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
grant_type=password&username=johndoe&password=A3ddj3w&service=hub.docker.io&client_id=dockerengine&access_type=offline
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5","expires_in":900,"scope":""}
|
||||
```
|
||||
|
||||
#### Example refreshing an Access Token
|
||||
|
||||
```none
|
||||
POST /token HTTP/1.1
|
||||
Host: auth.docker.io
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
grant_type=refresh_token&refresh_token=kas9Da81Dfa8&service=registry-1.docker.io&client_id=dockerengine&scope=repository:samalba/my-app:pull,push
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"refresh_token":"kas9Da81Dfa8","access_token":"eyJhbGciOiJFUzI1NiIsInR5":"expires_in":900,"scope":"repository:samalba/my-app:pull,repository:samalba/my-app:push"}
|
||||
```
|
156
docs/content/spec/auth/scope.md
Normal file
156
docs/content/spec/auth/scope.md
Normal file
|
@ -0,0 +1,156 @@
|
|||
---
|
||||
title: "Token Scope Documentation"
|
||||
description: "Describes the scope and access fields used for registry authorization tokens"
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, advanced, access, scope
|
||||
---
|
||||
|
||||
# Distribution Registry Token Scope and Access
|
||||
|
||||
Tokens used by the registry are always restricted what resources they may
|
||||
be used to access, where those resources may be accessed, and what actions
|
||||
may be done on those resources. Tokens always have the context of a user which
|
||||
the token was originally created for. This document describes how these
|
||||
restrictions are represented and enforced by the authorization server and
|
||||
resource providers.
|
||||
|
||||
## Scope Components
|
||||
|
||||
### Subject (Authenticated User)
|
||||
|
||||
The subject represents the user for which a token is valid. Any actions
|
||||
performed using an access token should be considered on behalf of the subject.
|
||||
This is included in the `sub` field of access token JWT. A refresh token should
|
||||
be limited to a single subject and only be able to give out access tokens for
|
||||
that subject.
|
||||
|
||||
### Audience (Resource Provider)
|
||||
|
||||
The audience represents a resource provider which is intended to be able to
|
||||
perform the actions specified in the access token. Any resource provider which
|
||||
does not match the audience should not use that access token. The audience is
|
||||
included in the `aud` field of the access token JWT. A refresh token should be
|
||||
limited to a single audience and only be able to give out access tokens for that
|
||||
audience.
|
||||
|
||||
### Resource Type
|
||||
|
||||
The resource type represents the type of resource which the resource name is
|
||||
intended to represent. This type may be specific to a resource provider but must
|
||||
be understood by the authorization server in order to validate the subject
|
||||
is authorized for a specific resource.
|
||||
|
||||
#### Resource Class
|
||||
|
||||
{{< hint type=warning >}}
|
||||
Resource Class is deprecated and ignored.
|
||||
`repository` and `repository(plugin)` are considered equal when authorizing a token.
|
||||
Authorization services should no longer return scopes with a resource class.
|
||||
{{< /hint >}}
|
||||
|
||||
The resource type might have a resource class which further classifies the
|
||||
the resource name within the resource type. A class is not required and
|
||||
is specific to the resource type.
|
||||
|
||||
#### Example Resource Types
|
||||
|
||||
- `repository` - represents a single repository within a registry. A
|
||||
repository may represent many manifest or content blobs, but the resource type
|
||||
is considered the collections of those items. Actions which may be performed on
|
||||
a `repository` are `pull` for accessing the collection and `push` for adding to
|
||||
it. By default the `repository` type has the class of `image`.
|
||||
- `repository(plugin)` - represents a single repository of plugins within a
|
||||
registry. A plugin repository has the same content and actions as a repository.
|
||||
- `registry` - represents the entire registry. Used for administrative actions
|
||||
or lookup operations that span an entire registry.
|
||||
|
||||
### Resource Name
|
||||
|
||||
The resource name represent the name which identifies a resource for a resource
|
||||
provider. A resource is identified by this name and the provided resource type.
|
||||
An example of a resource name would be the name component of an image tag, such
|
||||
as "samalba/myapp" or "hostname/samalba/myapp".
|
||||
|
||||
### Resource Actions
|
||||
|
||||
The resource actions define the actions which the access token allows to be
|
||||
performed on the identified resource. These actions are type specific but will
|
||||
normally have actions identifying read and write access on the resource. Example
|
||||
for the `repository` type are `pull` for read access and `push` for write
|
||||
access.
|
||||
|
||||
## Authorization Server Use
|
||||
|
||||
Each access token request may include a scope and an audience. The subject is
|
||||
always derived from the passed in credentials or refresh token. When using
|
||||
a refresh token the passed in audience must match the audience defined for
|
||||
the refresh token. The audience (resource provider) is provided using the
|
||||
`service` field. Multiple resource scopes may be provided using multiple `scope`
|
||||
fields on the `GET` request. The `POST` request only takes in a single
|
||||
`scope` field but may use a space to separate a list of multiple resource
|
||||
scopes.
|
||||
|
||||
### Resource Scope Grammar
|
||||
|
||||
```
|
||||
scope := resourcescope [ ' ' resourcescope ]*
|
||||
resourcescope := resourcetype ":" resourcename ":" action [ ',' action ]*
|
||||
resourcetype := resourcetypevalue [ '(' resourcetypevalue ')' ]
|
||||
resourcetypevalue := /[a-z0-9]+/
|
||||
resourcename := [ hostname '/' ] component [ '/' component ]*
|
||||
hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
|
||||
hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
port-number := /[0-9]+/
|
||||
action := /[a-z]*/
|
||||
component := alpha-numeric [ separator alpha-numeric ]*
|
||||
alpha-numeric := /[a-z0-9]+/
|
||||
separator := /[_.]|__|[-]*/
|
||||
```
|
||||
Full reference grammar is defined
|
||||
[here](https://pkg.go.dev/github.com/distribution/distribution/reference). Currently
|
||||
the scope name grammar is a subset of the reference grammar.
|
||||
|
||||
{{< hint type=note >}}
|
||||
Note that the `resourcename` may contain one `:` due to a possible port
|
||||
number in the hostname component of the `resourcename`, so a naive
|
||||
implementation that interprets the first three `:`-delimited tokens of a
|
||||
`scope` to be the `resourcetype`, `resourcename`, and a list of `action`
|
||||
would be insufficient.
|
||||
{{< /hint >}}
|
||||
|
||||
## Resource Provider Use
|
||||
|
||||
Once a resource provider has verified the authenticity of the scope through
|
||||
JWT access token verification, the resource provider must ensure that scope
|
||||
satisfies the request. The resource provider should match the given audience
|
||||
according to name or URI the resource provider uses to identify itself. Any
|
||||
denial based on subject is not defined here and is up to resource provider, the
|
||||
subject is mainly provided for audit logs and any other user-specific rules
|
||||
which may need to be provided but are not defined by the authorization server.
|
||||
|
||||
The resource provider must ensure that ANY resource being accessed as the
|
||||
result of a request has the appropriate access scope. Both the resource type
|
||||
and resource name must match the accessed resource and an appropriate action
|
||||
scope must be included.
|
||||
|
||||
When appropriate authorization is not provided either due to lack of scope
|
||||
or missing token, the resource provider to return a `WWW-AUTHENTICATE` HTTP
|
||||
header with the `realm` as the authorization server, the `service` as the
|
||||
expected audience identifying string, and a `scope` field for each required
|
||||
resource scope to complete the request.
|
||||
|
||||
## JWT Access Tokens
|
||||
|
||||
Each JWT access token may only have a single subject and audience but multiple
|
||||
resource scopes. The subject and audience are put into standard JWT fields
|
||||
`sub` and `aud`. The resource scope is put into the `access` field. The
|
||||
structure of the access field can be seen in the
|
||||
[jwt documentation](jwt.md).
|
||||
|
||||
## Refresh Tokens
|
||||
|
||||
A refresh token must be defined for a single subject and audience. Further
|
||||
restricting scope to specific type, name, and actions combinations should be
|
||||
done by fetching an access token using the refresh token. Since the refresh
|
||||
token is not scoped to specific resources for an audience, extra care should
|
||||
be taken to only use the refresh token to negotiate new access tokens directly
|
||||
with the authorization server, and never with a resource provider.
|
247
docs/content/spec/auth/token.md
Normal file
247
docs/content/spec/auth/token.md
Normal file
|
@ -0,0 +1,247 @@
|
|||
---
|
||||
title: "Token Authentication Specification"
|
||||
description: "Specifies the Distribution Registry v2 authentication"
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced
|
||||
---
|
||||
|
||||
# Distribution Registry v2 authentication via central service
|
||||
|
||||
This document outlines the v2 Distribution registry authentication scheme:
|
||||
|
||||

|
||||
|
||||
1. Attempt to begin a push/pull operation with the registry.
|
||||
2. If the registry requires authorization it will return a `401 Unauthorized`
|
||||
HTTP response with information on how to authenticate.
|
||||
3. The registry client makes a request to the authorization service for a
|
||||
Bearer token.
|
||||
4. The authorization service returns an opaque Bearer token representing the
|
||||
client's authorized access.
|
||||
5. The client retries the original request with the Bearer token embedded in
|
||||
the request's Authorization header.
|
||||
6. The Registry authorizes the client by validating the Bearer token and the
|
||||
claim set embedded within it and begins the push/pull session as usual.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Registry clients which can understand and respond to token auth challenges
|
||||
returned by the resource server.
|
||||
- An authorization server capable of managing access controls to their
|
||||
resources hosted by any given service (such as repositories in a Distribution
|
||||
Registry).
|
||||
- A Distribution Registry capable of trusting the authorization server to sign tokens
|
||||
which clients can use for authorization and the ability to verify these
|
||||
tokens for single use or for use during a sufficiently short period of time.
|
||||
|
||||
## Authorization Server Endpoint Descriptions
|
||||
|
||||
The described server is meant to serve as a standalone access control manager
|
||||
for resources hosted by other services which wish to authenticate and manage
|
||||
authorizations using a separate access control manager.
|
||||
|
||||
A service like this is used by public and private registries to authenticate
|
||||
clients and verify their authorization to image repositories.
|
||||
|
||||
## How to authenticate
|
||||
|
||||
Registry V1 clients first contact the index to initiate a push or pull. Under
|
||||
the Registry V2 workflow, clients should contact the registry first. If the
|
||||
registry server requires authentication it will return a `401 Unauthorized`
|
||||
response with a `WWW-Authenticate` header detailing how to authenticate to this
|
||||
registry.
|
||||
|
||||
For example, say I (username `jlhawn`) am attempting to push an image to the
|
||||
repository `samalba/my-app`. For the registry to authorize this, I will need
|
||||
`push` access to the `samalba/my-app` repository. The registry will first
|
||||
return this response:
|
||||
|
||||
```
|
||||
HTTP/1.1 401 Unauthorized
|
||||
Content-Type: application/json
|
||||
Docker-Distribution-Api-Version: registry/2.0
|
||||
Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push"
|
||||
Date: Thu, 10 Sep 2015 19:32:31 GMT
|
||||
Content-Length: 235
|
||||
Strict-Transport-Security: max-age=31536000
|
||||
|
||||
{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]}
|
||||
```
|
||||
|
||||
Note the HTTP Response Header indicating the auth challenge:
|
||||
|
||||
```
|
||||
Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push"
|
||||
```
|
||||
|
||||
This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3)
|
||||
|
||||
This challenge indicates that the registry requires a token issued by the
|
||||
specified token server and that the request the client is attempting will
|
||||
need to include sufficient access entries in its claim set. To respond to this
|
||||
challenge, the client will need to make a `GET` request to the URL
|
||||
`https://auth.docker.io/token` using the `service` and `scope` values from the
|
||||
`WWW-Authenticate` header.
|
||||
|
||||
## Requesting a Token
|
||||
|
||||
Defines getting a bearer and refresh token using the token endpoint.
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>service</code>
|
||||
</dt>
|
||||
<dd>
|
||||
The name of the service which hosts the resource.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>offline_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
Whether to return a refresh token along with the bearer token. A refresh
|
||||
token is capable of getting additional bearer tokens for the same
|
||||
subject with different scopes. The refresh token does not have an
|
||||
expiration and should be considered completely opaque to the client.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>client_id</code>
|
||||
</dt>
|
||||
<dd>
|
||||
String identifying the client. This client_id does not need
|
||||
to be registered with the authorization server but should be set to a
|
||||
meaningful value in order to allow auditing keys created by unregistered
|
||||
clients. Accepted syntax is defined in
|
||||
<a href="https://tools.ietf.org/html/rfc6749#appendix-A.1" rel="noopener noreferrer nofollow" target="_blank">RFC6749 Appendix A.1</a>.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>scope</code>
|
||||
</dt>
|
||||
<dd>
|
||||
The resource in question, formatted as one of the space-delimited
|
||||
entries from the <code>scope</code> parameters from the <code>WWW-Authenticate</code> header
|
||||
shown above. This query parameter should be specified multiple times if
|
||||
there is more than one <code>scope</code> entry from the <code>WWW-Authenticate</code>
|
||||
header. The above example would be specified as:
|
||||
<code>scope=repository:samalba/my-app:push</code>. The scope field may
|
||||
be empty to request a refresh token without providing any resource
|
||||
permissions to the returned bearer token.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
#### Token Response Fields
|
||||
|
||||
<dl>
|
||||
<dt>
|
||||
<code>token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
An opaque <code>Bearer</code> token that clients should supply to subsequent
|
||||
requests in the <code>Authorization</code> header.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>access_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
For compatibility with OAuth 2.0, we will also accept <code>token</code> under the name
|
||||
<code>access_token</code>. At least one of these fields <b>must</b> be specified, but
|
||||
both may also appear (for compatibility with older clients). When both are specified,
|
||||
they should be equivalent; if they differ the client's choice is undefined.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>expires_in</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(Optional) The duration in seconds since the token was issued that it
|
||||
will remain valid. When omitted, this defaults to 60 seconds. For
|
||||
compatibility with older clients, a token should never be returned with
|
||||
less than 60 seconds to live.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>issued_at</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(Optional) The <a href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339</a>-serialized UTC
|
||||
standard time at which a given token was issued. If <code>issued_at</code> is omitted, the
|
||||
expiration is from when the token exchange completed.
|
||||
</dd>
|
||||
<dt>
|
||||
<code>refresh_token</code>
|
||||
</dt>
|
||||
<dd>
|
||||
(Optional) Token which can be used to get additional access tokens for
|
||||
the same subject with different scopes. This token should be kept secure
|
||||
by the client and only sent to the authorization server which issues
|
||||
bearer tokens. This field will only be set when `offline_token=true` is
|
||||
provided in the request.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
#### Example
|
||||
|
||||
For this example, the client makes an HTTP GET request to the following URL:
|
||||
|
||||
```
|
||||
https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push
|
||||
```
|
||||
|
||||
The token server should first attempt to authenticate the client using any
|
||||
authentication credentials provided with the request. From Docker 1.11 the
|
||||
Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for
|
||||
getting tokens. Docker 1.10 and before, the registry client in the Docker Engine
|
||||
only supports Basic Authentication. If an attempt to authenticate to the token
|
||||
server fails, the token server should return a `401 Unauthorized` response
|
||||
indicating that the provided credentials are invalid.
|
||||
|
||||
Whether the token server requires authentication is up to the policy of that
|
||||
access control provider. Some requests may require authentication to determine
|
||||
access (such as pushing or pulling a private repository) while others may not
|
||||
(such as pulling from a public repository).
|
||||
|
||||
After authenticating the client (which may simply be an anonymous client if
|
||||
no attempt was made to authenticate), the token server must next query its
|
||||
access control list to determine whether the client has the requested scope. In
|
||||
this example request, if I have authenticated as user `jlhawn`, the token
|
||||
server will determine what access I have to the repository `samalba/my-app`
|
||||
hosted by the entity `registry.docker.io`.
|
||||
|
||||
Once the token server has determined what access the client has to the
|
||||
resources requested in the `scope` parameter, it will take the intersection of
|
||||
the set of requested actions on each resource and the set of actions that the
|
||||
client has in fact been granted. If the client only has a subset of the
|
||||
requested access **it must not be considered an error** as it is not the
|
||||
responsibility of the token server to indicate authorization errors as part of
|
||||
this workflow.
|
||||
|
||||
Continuing with the example request, the token server will find that the
|
||||
client's set of granted access to the repository is `[pull, push]` which when
|
||||
intersected with the requested access `[pull, push]` yields an equal set. If
|
||||
the granted access set was found only to be `[pull]` then the intersected set
|
||||
would only be `[pull]`. If the client has no access to the repository then the
|
||||
intersected set would be empty, `[]`.
|
||||
|
||||
It is this intersected set of access which is placed in the returned token.
|
||||
|
||||
The server then constructs an implementation-specific token with this
|
||||
intersected set of access, and returns it to the Docker client to use to
|
||||
authenticate to the audience service (within the indicated window of time):
|
||||
|
||||
```
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"}
|
||||
```
|
||||
|
||||
|
||||
## Using the Bearer token
|
||||
|
||||
Once the client has a token, it will try the registry request again with the
|
||||
token placed in the HTTP `Authorization` header like so:
|
||||
|
||||
```
|
||||
Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw
|
||||
```
|
||||
|
||||
This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1)
|
39
docs/content/spec/deprecated-schema-v1.md
Normal file
39
docs/content/spec/deprecated-schema-v1.md
Normal file
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
title: Image manifest version 2, schema 1
|
||||
description: Update deprecated schema v1 images
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, api, advanced, manifest
|
||||
---
|
||||
|
||||
With the release of image manifest version 2, schema 2, image manifest version
|
||||
2, schema 1 has been deprecated. This could lead to compatibility and
|
||||
vulnerability issues in images that haven't been updated to image manifest
|
||||
version 2, schema 2.
|
||||
|
||||
This page contains information on how to update from image manifest version 2,
|
||||
schema 1. However, these instructions will not ensure your new image will run
|
||||
successfully. There may be several other issues to troubleshoot that are
|
||||
associated with the deprecated image manifest that will block your image from
|
||||
running successfully. A list of possible methods to help update your image is
|
||||
also included below.
|
||||
|
||||
## Update to image manifest version 2, schema 2
|
||||
|
||||
One way to upgrade an image from image manifest version 2, schema 1 to
|
||||
schema 2 is to `docker pull` the image and then `docker push` the image with a
|
||||
current version of Docker. Doing so will automatically convert the image to use
|
||||
the latest image manifest specification.
|
||||
|
||||
Converting an image to image manifest version 2, schema 2 converts the
|
||||
manifest format, but does not update the contents within the image. Images
|
||||
using manifest version 2, schema 1 may contain unpatched vulnerabilities. We
|
||||
recommend looking for an alternative image or rebuilding it.
|
||||
|
||||
## Update FROM statement
|
||||
|
||||
You can rebuild the image by updating the `FROM` statement in your
|
||||
`Dockerfile`. If your image manifest is out-of-date, there is a chance the
|
||||
image pulled from your `FROM` statement in your `Dockerfile` is also
|
||||
out-of-date. See the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/#from)
|
||||
and the [Dockerfile best practices guide](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/)
|
||||
for more information on how to update the `FROM` statement in your
|
||||
`Dockerfile`.
|
30
docs/content/spec/implementations.md
Normal file
30
docs/content/spec/implementations.md
Normal file
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
draft: true
|
||||
---
|
||||
|
||||
# Distribution API Implementations
|
||||
|
||||
This is a list of known implementations of the Distribution API spec.
|
||||
|
||||
## [CNCF Distribution Registry](https://github.com/distribution/distribution)
|
||||
|
||||
CNCF distribution is the reference implementation of the distribution API
|
||||
specification. It aims to fully implement the entire specification.
|
||||
|
||||
### Releases
|
||||
#### 2.0.1 (_in development_)
|
||||
Implements API 2.0.1
|
||||
|
||||
_Known Issues_
|
||||
- No resumable push support
|
||||
- Content ranges ignored
|
||||
- Blob upload status will always return a starting range of 0
|
||||
|
||||
#### 2.0.0
|
||||
Implements API 2.0.0
|
||||
|
||||
_Known Issues_
|
||||
- No resumable push support
|
||||
- No PATCH implementation for blob upload
|
||||
- Content ranges ignored
|
||||
|
90
docs/content/spec/json.md
Normal file
90
docs/content/spec/json.md
Normal file
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
draft: true
|
||||
title: "CNCF Distribution JSON Canonicalization"
|
||||
description: "Explains registry JSON objects"
|
||||
keywords: ["registry, service, images, repository, json"]
|
||||
---
|
||||
|
||||
|
||||
|
||||
# CNCF Distribution JSON Canonicalization
|
||||
|
||||
To provide consistent content hashing of JSON objects throughout CNCF
|
||||
Distribution APIs, the specification defines a canonical JSON format. Adopting
|
||||
such a canonicalization also aids in caching JSON responses.
|
||||
|
||||
Note that protocols should not be designed to depend on identical JSON being
|
||||
generated across different versions or clients. The canonicalization rules are
|
||||
merely useful for caching and consistency.
|
||||
|
||||
## Rules
|
||||
|
||||
Compliant JSON should conform to the following rules:
|
||||
|
||||
1. All generated JSON should comply with [RFC
|
||||
7159](http://www.ietf.org/rfc/rfc7159.txt).
|
||||
2. Resulting "JSON text" shall always be encoded in UTF-8.
|
||||
3. Unless a canonical key order is defined for a particular schema, object
|
||||
keys shall always appear in lexically sorted order.
|
||||
4. All whitespace between tokens should be removed.
|
||||
5. No "trailing commas" are allowed in object or array definitions.
|
||||
6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e".
|
||||
Ampersand "&" is escaped to "\u0026".
|
||||
|
||||
## Examples
|
||||
|
||||
The following is a simple example of a canonicalized JSON string:
|
||||
|
||||
```json
|
||||
{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]}
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
### Other Canonicalizations
|
||||
|
||||
The OLPC project specifies [Canonical
|
||||
JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in
|
||||
[TUF](http://theupdateframework.com/), which may be used with other
|
||||
distribution-related protocols, this alternative format has been proposed in
|
||||
case the original source changes. Specifications complying with either this
|
||||
specification or an alternative should explicitly call out the
|
||||
canonicalization format. Except for key ordering, this specification is mostly
|
||||
compatible.
|
||||
|
||||
### Go
|
||||
|
||||
In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library
|
||||
will emit canonical JSON by default. Simply using `json.Marshal` will suffice
|
||||
in most cases:
|
||||
|
||||
```go
|
||||
incoming := map[string]interface{}{
|
||||
"asdf": 1,
|
||||
"qwer": []interface{}{},
|
||||
"zxcv": []interface{}{
|
||||
map[string]interface{}{},
|
||||
true,
|
||||
int(1e9),
|
||||
"tyui",
|
||||
},
|
||||
}
|
||||
|
||||
canonical, err := json.Marshal(incoming)
|
||||
if err != nil {
|
||||
// ... handle error
|
||||
}
|
||||
```
|
||||
|
||||
To apply canonical JSON format spacing to an existing serialized JSON buffer, one
|
||||
can use
|
||||
[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65)
|
||||
with the following arguments:
|
||||
|
||||
```go
|
||||
incoming := getBytes()
|
||||
var canonical bytes.Buffer
|
||||
if err := json.Indent(&canonical, incoming, "", ""); err != nil {
|
||||
// ... handle error
|
||||
}
|
||||
```
|
294
docs/content/spec/manifest-v2-2.md
Normal file
294
docs/content/spec/manifest-v2-2.md
Normal file
|
@ -0,0 +1,294 @@
|
|||
---
|
||||
title: "Image Manifest V 2, Schema 2"
|
||||
description: "image manifest for the Registry."
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, api, advanced, manifest
|
||||
---
|
||||
|
||||
# Image Manifest Version 2, Schema 2
|
||||
|
||||
This document outlines the format of the V2 image manifest, schema version 2.
|
||||
The original (and provisional) image manifest for V2 (schema 1), was introduced
|
||||
in the Docker daemon in the [v1.3.0
|
||||
release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453)
|
||||
and is now deprecated.
|
||||
|
||||
This second schema version has two primary goals. The first is to allow
|
||||
multi-architecture images, through a "fat manifest" which references image
|
||||
manifests for platform-specific versions of an image. The second is to
|
||||
move the Docker engine towards content-addressable images, by supporting
|
||||
an image model where the image's configuration can be hashed to generate
|
||||
an ID for the image.
|
||||
|
||||
# Media Types
|
||||
|
||||
The following media types are used by the manifest formats described here, and
|
||||
the resources they reference:
|
||||
|
||||
- `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2)
|
||||
- `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest"
|
||||
- `application/vnd.docker.container.image.v1+json`: Container config JSON
|
||||
- `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar
|
||||
- `application/vnd.docker.image.rootfs.foreign.diff.tar.gzip`: "Layer", as a gzipped tar that should never be pushed
|
||||
- `application/vnd.docker.plugin.v1+json`: Plugin config JSON
|
||||
|
||||
## Manifest List
|
||||
|
||||
The manifest list is the "fat manifest" which points to specific image manifests
|
||||
for one or more platforms. Its use is optional, and relatively few images will
|
||||
use one of these manifests. A client will distinguish a manifest list from an
|
||||
image manifest based on the Content-Type returned in the HTTP response.
|
||||
|
||||
## *Manifest List* Field Descriptions
|
||||
|
||||
- **`schemaVersion`** *int*
|
||||
|
||||
This field specifies the image manifest schema version as an integer. This
|
||||
schema uses the version `2`.
|
||||
|
||||
- **`mediaType`** *string*
|
||||
|
||||
The MIME type of the manifest list. This should be set to
|
||||
`application/vnd.docker.distribution.manifest.list.v2+json`.
|
||||
|
||||
- **`manifests`** *array*
|
||||
|
||||
The manifests field contains a list of manifests for specific platforms.
|
||||
|
||||
Fields of an object in the manifests list are:
|
||||
|
||||
- **`mediaType`** *string*
|
||||
|
||||
The MIME type of the referenced object. This will generally be
|
||||
`application/vnd.docker.distribution.manifest.v2+json`.
|
||||
|
||||
- **`size`** *int*
|
||||
|
||||
The size in bytes of the object. This field exists so that a client
|
||||
will have an expected size for the content before validating. If the
|
||||
length of the retrieved content does not match the specified length,
|
||||
the content should not be trusted.
|
||||
|
||||
- **`digest`** *string*
|
||||
|
||||
The digest of the content, as defined by the
|
||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
||||
|
||||
- **`platform`** *object*
|
||||
|
||||
The platform object describes the platform which the image in the
|
||||
manifest runs on. A full list of valid operating system and architecture
|
||||
values are listed in the [Go language documentation for `$GOOS` and
|
||||
`$GOARCH`](https://golang.org/doc/install/source#environment)
|
||||
|
||||
- **`architecture`** *string*
|
||||
|
||||
The architecture field specifies the CPU architecture, for example
|
||||
`amd64` or `ppc64le`.
|
||||
|
||||
- **`os`** *string*
|
||||
|
||||
The os field specifies the operating system, for example
|
||||
`linux` or `windows`.
|
||||
|
||||
- **`os.version`** *string*
|
||||
|
||||
The optional os.version field specifies the operating system version,
|
||||
for example `10.0.10586`.
|
||||
|
||||
- **`os.features`** *array*
|
||||
|
||||
The optional os.features field specifies an array of strings,
|
||||
each listing a required OS feature (for example on Windows
|
||||
`win32k`).
|
||||
|
||||
- **`variant`** *string*
|
||||
|
||||
The optional variant field specifies a variant of the CPU, for
|
||||
example `v6` to specify a particular CPU variant of the ARM CPU.
|
||||
|
||||
- **`features`** *array*
|
||||
|
||||
The optional features field specifies an array of strings, each
|
||||
listing a required CPU feature (for example `sse4` or `aes`).
|
||||
|
||||
## Example Manifest List
|
||||
|
||||
Example showing a simple manifest list pointing to image manifests for two platforms:
|
||||
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
|
||||
"size": 7143,
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
|
||||
"size": 7682,
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"features": [
|
||||
"sse4"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Image Manifest
|
||||
|
||||
The image manifest provides a configuration and a set of layers for a container
|
||||
image. It's the direct replacement for the schema-1 manifest.
|
||||
|
||||
## *Image Manifest* Field Descriptions
|
||||
|
||||
- **`schemaVersion`** *int*
|
||||
|
||||
This field specifies the image manifest schema version as an integer. This
|
||||
schema uses version `2`.
|
||||
|
||||
- **`mediaType`** *string*
|
||||
|
||||
The MIME type of the manifest. This should be set to
|
||||
`application/vnd.docker.distribution.manifest.v2+json`.
|
||||
|
||||
- **`config`** *object*
|
||||
|
||||
The config field references a configuration object for a container, by
|
||||
digest. This configuration item is a JSON blob that the runtime uses
|
||||
to set up the container. This new schema uses a tweaked version
|
||||
of this configuration to allow image content-addressability on the
|
||||
daemon side.
|
||||
|
||||
Fields of a config object are:
|
||||
|
||||
- **`mediaType`** *string*
|
||||
|
||||
The MIME type of the referenced object. This should generally be
|
||||
`application/vnd.docker.container.image.v1+json`.
|
||||
|
||||
- **`size`** *int*
|
||||
|
||||
The size in bytes of the object. This field exists so that a client
|
||||
will have an expected size for the content before validating. If the
|
||||
length of the retrieved content does not match the specified length,
|
||||
the content should not be trusted.
|
||||
|
||||
- **`digest`** *string*
|
||||
|
||||
The digest of the content, as defined by the
|
||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
||||
|
||||
- **`layers`** *array*
|
||||
|
||||
The layer list is ordered starting from the base image (opposite order of schema1).
|
||||
|
||||
Fields of an item in the layers list are:
|
||||
|
||||
- **`mediaType`** *string*
|
||||
|
||||
The MIME type of the referenced object. This should
|
||||
generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`.
|
||||
Layers of type
|
||||
`application/vnd.docker.image.rootfs.foreign.diff.tar.gzip` may be
|
||||
pulled from a remote location but they should never be pushed.
|
||||
|
||||
- **`size`** *int*
|
||||
|
||||
The size in bytes of the object. This field exists so that a client
|
||||
will have an expected size for the content before validating. If the
|
||||
length of the retrieved content does not match the specified length,
|
||||
the content should not be trusted.
|
||||
|
||||
- **`digest`** *string*
|
||||
|
||||
The digest of the content, as defined by the
|
||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
||||
|
||||
- **`urls`** *array*
|
||||
|
||||
Provides a list of URLs from which the content may be fetched. Content
|
||||
must be verified against the `digest` and `size`. This field is
|
||||
optional and uncommon.
|
||||
|
||||
## Example Image Manifest
|
||||
|
||||
Example showing an image manifest:
|
||||
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
|
||||
"size": 7023
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
|
||||
"size": 32654
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b",
|
||||
"size": 16724
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736",
|
||||
"size": 73109
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Backward compatibility
|
||||
|
||||
The registry will continue to accept uploads of manifests in both the old and
|
||||
new formats.
|
||||
|
||||
When pushing images, clients which support the new manifest format should first
|
||||
construct a manifest in the new format. If uploading this manifest fails,
|
||||
presumably because the registry only supports the old format, the client may
|
||||
fall back to uploading a manifest in the old format.
|
||||
|
||||
When pulling images, clients indicate support for this new version of the
|
||||
manifest format by sending the
|
||||
`application/vnd.docker.distribution.manifest.v2+json` and
|
||||
`application/vnd.docker.distribution.manifest.list.v2+json` media types in an
|
||||
`Accept` header when making a request to the `manifests` endpoint. Updated
|
||||
clients should check the `Content-Type` header to see whether the manifest
|
||||
returned from the endpoint is in the old format, or is an image manifest or
|
||||
manifest list in the new format.
|
||||
|
||||
If the manifest being requested uses the new format, and the appropriate media
|
||||
type is not present in an `Accept` header, the registry will assume that the
|
||||
client cannot handle the manifest as-is, and rewrite it on the fly into the old
|
||||
format. If the object that would otherwise be returned is a manifest list, the
|
||||
registry will look up the appropriate manifest for the amd64 platform and
|
||||
linux OS, rewrite that manifest into the old format if necessary, and return
|
||||
the result to the client. If no suitable manifest is found in the manifest
|
||||
list, the registry will return a 404 error.
|
||||
|
||||
One of the challenges in rewriting manifests to the old format is that the old
|
||||
format involves an image configuration for each layer in the manifest, but the
|
||||
new format only provides one image configuration. To work around this, the
|
||||
registry will create synthetic image configurations for all layers except the
|
||||
top layer. These image configurations will not result in runnable images on
|
||||
their own, but only serve to fill in the parent chain in a compatible way.
|
||||
The IDs in these synthetic configurations will be derived from hashes of their
|
||||
respective blobs. The registry will create these configurations and their IDs
|
||||
using the same scheme as Docker 1.10 when it creates a legacy manifest to push
|
||||
to a registry which doesn't support the new format.
|
69
docs/content/storage-drivers/_index.md
Normal file
69
docs/content/storage-drivers/_index.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
---
|
||||
description: Explains how to use storage drivers
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, storage drivers, advanced
|
||||
title: Registry storage driver
|
||||
---
|
||||
|
||||
This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers.
|
||||
|
||||
## Provided drivers
|
||||
|
||||
This storage driver package comes bundled with several drivers:
|
||||
|
||||
- [inmemory](inmemory): A temporary storage driver using a local inmemory map. This exists solely for reference and testing.
|
||||
- [filesystem](filesystem): A local storage driver configured to use a directory tree in the local filesystem.
|
||||
- [s3](s3): A driver storing objects in an Amazon Simple Storage Service (S3) bucket.
|
||||
- [azure](azure): A driver storing objects in [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/).
|
||||
- [gcs](gcs): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket.
|
||||
- oss: *NO LONGER SUPPORTED*
|
||||
- swift: *NO LONGER SUPPORTED*
|
||||
|
||||
## Storage driver API
|
||||
|
||||
The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems.
|
||||
|
||||
Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key.
|
||||
|
||||
Storage drivers are intended to be written in Go, providing compile-time
|
||||
validation of the `storagedriver.StorageDriver` interface.
|
||||
|
||||
## Driver selection and configuration
|
||||
|
||||
The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based on the [Register](https://golang.org/pkg/database/sql/#Register) and [Open](https://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](https://golang.org/pkg/database/sql) package.
|
||||
|
||||
Storage driver factories may be registered by name using the
|
||||
`factory.Register` method, and then later invoked by calling `factory.Create`
|
||||
with a driver name and parameters map. If no such storage driver can be found,
|
||||
`factory.Create` returns an `InvalidStorageDriverError`.
|
||||
|
||||
## Driver contribution
|
||||
|
||||
New storage drivers are not currently being accepted.
|
||||
See <https://github.com/distribution/distribution/issues/3988> for discussion.
|
||||
|
||||
There are forks of this repo that implement custom storage drivers.
|
||||
These are not supported by the OCI distribution project.
|
||||
The known forks are:
|
||||
|
||||
- Storj DCS: <https://github.com/storj/docker-registry>
|
||||
- HuaweiCloud OBS: <https://github.com/setoru/distribution/tree/obs>
|
||||
- us3: <https://github.com/lambertxiao/distribution/tree/main>
|
||||
- Baidu BOS: <https://github.com/dolfly/distribution/tree/bos>
|
||||
- HDFS: <https://github.com/haosdent/distribution/tree/master>
|
||||
|
||||
### Writing new storage drivers
|
||||
|
||||
To create a valid storage driver, one must implement the
|
||||
`storagedriver.StorageDriver` interface and make sure to expose this driver
|
||||
via the factory system.
|
||||
|
||||
#### Registering
|
||||
|
||||
Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase.
|
||||
|
||||
## Testing
|
||||
|
||||
Storage driver test suites are provided in
|
||||
`storagedriver/testsuites/testsuites.go` and may be used for any storage
|
||||
driver written in Go. Tests can be registered using the `RegisterSuite`
|
||||
function, which run the same set of tests for any registered drivers.
|
41
docs/content/storage-drivers/azure.md
Normal file
41
docs/content/storage-drivers/azure.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
description: Explains how to use the Azure storage drivers
|
||||
keywords: registry, service, driver, images, storage, azure
|
||||
title: Microsoft Azure storage driver
|
||||
---
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/) for object storage.
|
||||
|
||||
## Parameters
|
||||
|
||||
| Parameter | Required | Description |
|
||||
|:-----------------------------------|:---------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `accountname` | yes | Name of the Azure Storage Account. |
|
||||
| `accountkey` | yes | Primary or Secondary Key for the Storage Account. |
|
||||
| `container` | yes | Name of the Azure root storage container in which all registry data is stored. Must comply the storage container name [requirements](https://docs.microsoft.com/rest/api/storageservices/fileservices/naming-and-referencing-containers--blobs--and-metadata). For example, if your url is `https://myaccount.blob.core.windows.net/myblob` use the container value of `myblob`.|
|
||||
| `realm` | no | Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this is `core.windows.net`. |
|
||||
| `copy_status_poll_max_retry` | no | Max retry number for polling of copy operation status. Retries use a simple backoff algorithm where each retry number is multiplied by `copy_status_poll_delay`, and this number is used as the delay. Set to -1 to disable retries and abort if the copy does not complete immediately. Defaults to 5. |
|
||||
| `copy_status_poll_delay` | no | Time to wait between retries for polling of copy operation status. This time is multiplied by N on each retry, where N is the retry number. Defaults to 100ms |
|
||||
|
||||
|
||||
## Related information
|
||||
|
||||
* To get information about Azure blob storage [the offical docs](https://azure.microsoft.com/en-us/services/storage/).
|
||||
* You can use Azure [Blob Service REST API](https://docs.microsoft.com/en-us/rest/api/storageservices/Blob-Service-REST-API) to [create a storage container](https://docs.microsoft.com/en-us/rest/api/storageservices/Create-Container).
|
||||
|
||||
## Azure identity
|
||||
|
||||
In order to use managed identity to access Azure blob storage you can use [Microsoft Bicep](https://learn.microsoft.com/en-us/azure/templates/microsoft.app/managedenvironments/storages?pivots=deployment-language-bicep).
|
||||
|
||||
The following will configure credentials that will be used by the Azure storage driver to construct AZ Identity that will be used to access the blob storage:
|
||||
```
|
||||
properties: {
|
||||
azure: {
|
||||
accountname: accountname
|
||||
container: containername
|
||||
credentials: {
|
||||
type: default
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
19
docs/content/storage-drivers/filesystem.md
Normal file
19
docs/content/storage-drivers/filesystem.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
description: Explains how to use the filesystem storage drivers
|
||||
keywords: registry, service, driver, images, storage, filesystem
|
||||
title: Filesystem storage driver
|
||||
---
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem.
|
||||
|
||||
## Parameters
|
||||
|
||||
* `rootdirectory`: (optional) The absolute path to a root directory tree in which
|
||||
to store all registry files. The registry stores all its data here so make sure
|
||||
there is adequate space available. Defaults to `/var/lib/registry`. If the directory
|
||||
does not exist, it will be created honoring [`umask`](https://man7.org/linux/man-pages/man2/umask.2.html)
|
||||
bits. If `umask` bits are not set, the resulting permission will be `0777`.
|
||||
* `maxthreads`: (optional) The maximum number of simultaneous blocking filesystem
|
||||
operations permitted within the registry. Each operation spawns a new thread and
|
||||
may cause thread exhaustion issues if many are done in parallel. Defaults to
|
||||
`100`, and cannot be lower than `25`.
|
24
docs/content/storage-drivers/gcs.md
Normal file
24
docs/content/storage-drivers/gcs.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
description: Explains how to use the Google Cloud Storage drivers
|
||||
keywords: registry, service, driver, images, storage, gcs, google, cloud
|
||||
title: Google Cloud Storage driver
|
||||
---
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage.
|
||||
|
||||
## Parameters
|
||||
|
||||
| Parameter | Required | Description |
|
||||
|:--------------|:---------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `bucket` | yes | The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). |
|
||||
| `keyfile` | no | A private service account key file in JSON format used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). |
|
||||
| `rootdirectory` | no | The root directory tree in which all registry files are stored. Defaults to the empty string (bucket root). If a prefix is used, the path `bucketname/<prefix>` has to be pre-created before starting the registry. The prefix is applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary.|
|
||||
| `chunksize` | no (default 5242880) | This is the chunk size used for uploading large blobs, must be a multiple of 256*1024. |
|
||||
|
||||
{{< hint type=note >}}
|
||||
Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials).
|
||||
|
||||
To use redirects with default credentials assigned to a virtual machine you have to enable "IAM Service Account Credentials API" and grant `iam.serviceAccounts.signBlob` permission on the used service account.
|
||||
|
||||
To use redirects with default credentials from Google Cloud CLI, in addition to the permissions mentioned above, you have to [impersonate the service account intended to be used by the registry](https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account).
|
||||
{{< /hint >}}
|
18
docs/content/storage-drivers/inmemory.md
Normal file
18
docs/content/storage-drivers/inmemory.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
description: Explains how to use the in-memory storage drivers
|
||||
keywords: registry, service, driver, images, storage, in-memory
|
||||
title: In-memory storage driver (testing only)
|
||||
---
|
||||
|
||||
For purely tests purposes, you can use the `inmemory` storage driver. This
|
||||
driver is an implementation of the `storagedriver.StorageDriver` interface which
|
||||
uses local memory for object storage. If you would like to run a registry from
|
||||
volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk.
|
||||
|
||||
{{< hint type=important >}}
|
||||
This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production.
|
||||
{{< /hint >}}
|
||||
|
||||
## Parameters
|
||||
|
||||
None
|
15
docs/content/storage-drivers/middleware/_index.md
Normal file
15
docs/content/storage-drivers/middleware/_index.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
description: Explains how to use storage middleware
|
||||
keywords: registry, on-prem, images, tags, repository, distribution, storage drivers, advanced
|
||||
title: Storage middleware
|
||||
---
|
||||
|
||||
This document describes the registry storage middleware.
|
||||
|
||||
## Provided middleware
|
||||
|
||||
This storage driver package comes bundled with several middleware options:
|
||||
|
||||
- cloudfront
|
||||
- redirect
|
||||
- [rewrite](rewrite): Partially rewrites the URL returned by the storage driver.
|
32
docs/content/storage-drivers/middleware/rewrite.md
Normal file
32
docs/content/storage-drivers/middleware/rewrite.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
description: Explains how to use the rewrite storage middleware
|
||||
keywords: registry, service, driver, images, storage, middleware, rewrite
|
||||
title: Rewrite middleware
|
||||
---
|
||||
|
||||
A storage middleware which allows to rewrite the URL returned by the storage driver.
|
||||
|
||||
For example, it can be used to rewrite the Blob Storage URL returned by the Azure Blob Storage driver to use Azure CDN.
|
||||
|
||||
## Parameters
|
||||
|
||||
* `scheme`: (optional): Rewrite the returned URL scheme (if set).
|
||||
* `host`: (optional): Rewrite the returned URL host (if set).
|
||||
* `trimpathprefix` (optional): Trim the prefix from the returned URL path (if set).
|
||||
|
||||
## Example configuration
|
||||
|
||||
```yaml
|
||||
storage:
|
||||
azure:
|
||||
accountname: "ACCOUNT_NAME"
|
||||
accountkey: "******"
|
||||
container: container-name
|
||||
middleware:
|
||||
storage:
|
||||
- name: rewrite
|
||||
options:
|
||||
scheme: https
|
||||
host: example-cdn-endpoint.azurefd.net
|
||||
trimpathprefix: /container-name
|
||||
```
|
186
docs/content/storage-drivers/s3.md
Normal file
186
docs/content/storage-drivers/s3.md
Normal file
|
@ -0,0 +1,186 @@
|
|||
---
|
||||
description: Explains how to use the S3 storage drivers
|
||||
keywords: registry, service, driver, images, storage, S3
|
||||
title: S3 storage driver
|
||||
---
|
||||
|
||||
An implementation of the `storagedriver.StorageDriver` interface which uses
|
||||
Amazon S3 or S3 compatible services for object storage.
|
||||
|
||||
## Parameters
|
||||
|
||||
| Parameter | Required | Description |
|
||||
|:--------------|:---------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `accesskey` | no | Your AWS Access Key. If you use [IAM roles](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), omit to fetch temporary credentials from IAM. |
|
||||
| `secretkey` | no | Your AWS Secret Key. If you use [IAM roles](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), omit to fetch temporary credentials from IAM. |
|
||||
| `region` | yes | The AWS region in which your bucket exists. |
|
||||
| `regionendpoint` | no | Endpoint for S3 compatible storage services (Minio, etc). |
|
||||
| `forcepathstyle` | no | To enable path-style addressing when the value is set to `true`. The default is `false`. |
|
||||
| `bucket` | yes | The bucket name in which you want to store the registry's data. |
|
||||
| `encrypt` | no | Specifies whether the registry stores the image in encrypted format or not. A boolean value. The default is `false`. |
|
||||
| `keyid` | no | Optional KMS key ID to use for encryption (encrypt must be true, or this parameter is ignored). The default is `none`. |
|
||||
| `secure` | no | Indicates whether to use HTTPS instead of HTTP. A boolean value. The default is `true`. |
|
||||
| `skipverify` | no | Skips TLS verification when the value is set to `true`. The default is `false`. |
|
||||
| `v4auth` | no | Indicates whether the registry uses Version 4 of AWS's authentication. The default is `true`. |
|
||||
| `chunksize` | no | The S3 API requires multipart upload chunks to be at least 5MB. This value should be a number that is larger than 5 * 1024 * 1024.|
|
||||
| `multipartcopychunksize` | no | Default chunk size for all but the last S3 Multipart Upload part when copying stored objects. |
|
||||
| `multipartcopymaxconcurrency` | no | Max number of concurrent S3 Multipart Upload operations when copying stored objects. |
|
||||
| `multipartcopythresholdsize` | no | Default object size above which S3 Multipart Upload will be used when copying stored objects. |
|
||||
| `rootdirectory` | no | This is a prefix that is applied to all S3 keys to allow you to segment data in your bucket if necessary. |
|
||||
| `storageclass` | no | The S3 storage class applied to each registry file. The default is `STANDARD`. |
|
||||
| `useragent` | no | The `User-Agent` header value for S3 API operations. |
|
||||
| `usedualstack` | no | Use AWS dual-stack API endpoints. |
|
||||
| `accelerate` | no | Enable S3 Transfer Acceleration. |
|
||||
| `objectacl` | no | The S3 Canned ACL for objects. The default value is "private". |
|
||||
| `loglevel` | no | The log level for the S3 client. The default value is `off`. |
|
||||
|
||||
> **Note** You can provide empty strings for your access and secret keys to run the driver
|
||||
> on an ec2 instance and handles authentication with the instance's credentials. If you
|
||||
> use [IAM roles](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html),
|
||||
> omit these keys to fetch temporary credentials from IAM.
|
||||
|
||||
`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, see [Regions, Availability Zones, and Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html).
|
||||
|
||||
`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3.
|
||||
|
||||
`forcepathstyle`: (optional) Force path style for S3 compatible APIs. Some manufacturers only support force path style, while others only support DNS based bucket routing. Amazon S3 supports both. The value of this parameter applies, regardless of the region settings.
|
||||
|
||||
`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization.
|
||||
|
||||
`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified).
|
||||
|
||||
`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, is ignored if encrypt is not true).
|
||||
|
||||
`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. While setting this to false improves performance, it is not recommended due to security concerns.
|
||||
|
||||
`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to `false` if not specified. The `eu-central-1` region does not work with version 2 signatures, so the driver errors out if initialized with this region and v4auth set to `false`.
|
||||
|
||||
`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections benefit from larger chunk sizes.
|
||||
|
||||
`multipartcopychunksize`: (optional) The default chunk size for all but the last Upload Part in the S3 Multipart Upload operation when copying stored objects. Default value is set to `32 MB`.
|
||||
|
||||
`multipartcopymaxconcurrency`: (optional) The default maximum number of concurrent Upload Part operations in the S3 Multipart Upload when copying stored objects. Default value is set to `100`.
|
||||
|
||||
`multipartcopythresholdsize`: (optional) The default S3 object size above which multipart copy will be used when copying the object. Otherwise the object is copied with a single S3 API operation. Default value is set to ` 32 MB`.
|
||||
|
||||
`rootdirectory`: (optional) The root directory tree in which all registry files are stored. Defaults to the empty string (bucket root).
|
||||
|
||||
`storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are STANDARD and REDUCED_REDUNDANCY.
|
||||
|
||||
`useragent`: (optional) The `User-Agent` header value for S3 API operations.
|
||||
|
||||
`usedualstack`: (optional) Use AWS dual-stack API endpoints which support requests to S3 buckets over IPv6 and IPv4.
|
||||
|
||||
`accelerate`: (optional) Enable S3 transfer acceleration for faster transfers of files over long distances.
|
||||
|
||||
`objectacl`: (optional) The canned object ACL to be applied to each registry object. Defaults to `private`. If you are using a bucket owned by another AWS account, it is recommended that you set this to `bucket-owner-full-control` so that the bucket owner can access your objects. Other valid options are available in the [AWS S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl).
|
||||
|
||||
`loglevel`: (optional) Valid values are: `off` (default), `debug`, `debugwithsigning`, `debugwithhttpbody`, `debugwithrequestretries`, `debugwithrequesterrors` and `debugwitheventstreambody`. See the [AWS SDK for Go API reference](https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType) for details.
|
||||
|
||||
**NOTE:** Currently the S3 storage driver only supports S3 API compatible storage that
|
||||
allows parts of a multipart upload to vary in size. [Cloudflare R2 is not supported.](https://developers.cloudflare.com/r2/objects/multipart-objects/#limitations)
|
||||
|
||||
## S3 permission scopes
|
||||
|
||||
The following AWS policy is required by the registry for push and pull. Make sure to replace `S3_BUCKET_NAME` with the name of your bucket.
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetBucketLocation",
|
||||
"s3:ListBucketMultipartUploads"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::S3_BUCKET_NAME"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:ListMultipartUploadParts",
|
||||
"s3:AbortMultipartUpload"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::S3_BUCKET_NAME/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
See [the S3 policy documentation](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) for more details.
|
||||
|
||||
# CloudFront as Middleware with S3 backend
|
||||
|
||||
## Use Case
|
||||
|
||||
Adding CloudFront as a middleware for your S3 backed registry can dramatically
|
||||
improve pull times. Your registry can retrieve your images
|
||||
from edge servers, rather than the geographically limited location of your S3
|
||||
bucket. The farther your registry is from your bucket, the more improvements are
|
||||
possible. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/).
|
||||
|
||||
An alternative method for CloudFront that requires less configuration and will use
|
||||
the same edge servers is [S3 Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
|
||||
Please check acceleration [Requirements](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html#transfer-acceleration-requirements)
|
||||
to see whether you need CloudFront or S3 Transfer Acceleration.
|
||||
|
||||
## Configuring CloudFront for Distribution
|
||||
|
||||
If you are unfamiliar with creating a CloudFront distribution, see [Getting
|
||||
Started with
|
||||
Cloudfront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html).
|
||||
|
||||
Defaults can be kept in most areas except:
|
||||
|
||||
### Origin:
|
||||
|
||||
- The CloudFront distribution must be created such that the `Origin Path` is set
|
||||
to the directory level of the root "docker" key in S3. If your registry exists
|
||||
on the root of the bucket, this path should be left blank.
|
||||
|
||||
- For private S3 buckets, you must set `Restrict Bucket Access` to `Yes`. See
|
||||
the [CloudFront documentation](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html).
|
||||
|
||||
|
||||
### Behaviors:
|
||||
|
||||
- Viewer Protocol Policy: HTTPS Only
|
||||
- Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE
|
||||
- Cached HTTP Methods: OPTIONS (checked)
|
||||
- Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes
|
||||
- Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts)
|
||||
|
||||
## Registry configuration
|
||||
|
||||
Here the `middleware` option is used. It is still important to keep the
|
||||
`storage` option, because CloudFront only handles `pull` actions; `push` actions
|
||||
are still directly written to S3.
|
||||
|
||||
The following example shows a minimum configuration:
|
||||
|
||||
```yaml
|
||||
...
|
||||
storage:
|
||||
s3:
|
||||
region: us-east-1
|
||||
bucket: docker.myregistry.com
|
||||
middleware:
|
||||
storage:
|
||||
- name: cloudfront
|
||||
options:
|
||||
baseurl: https://abcdefghijklmn.cloudfront.net/
|
||||
privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem
|
||||
keypairid: ABCEDFGHIJKLMNOPQRST
|
||||
...
|
||||
```
|
||||
|
||||
## CloudFront Key-Pair
|
||||
|
||||
A CloudFront key-pair is required for all AWS accounts needing access to your
|
||||
CloudFront distribution. You must have access to your AWS account's root credentials to create the required Cloudfront keypair. For information, see [Creating CloudFront Key
|
||||
Pairs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs).
|
6
docs/data/menu/extra.yaml
Normal file
6
docs/data/menu/extra.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
header:
|
||||
- name: GitHub
|
||||
ref: https://github.com/distribution/distribution/
|
||||
icon: gdoc_github
|
||||
external: true
|
56
docs/dockerhub.md
Normal file
56
docs/dockerhub.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
# Distribution
|
||||
|
||||
This repository provides container images for the Open Source Registry implementation for storing and distributing container artifacts in conformance with the
|
||||
[OCI Distribution Specification](https://github.com/opencontainers/distribution-spec).
|
||||
|
||||
<img src="https://raw.githubusercontent.com/distribution/distribution/main/distribution-logo.svg" width="200px" />
|
||||
|
||||
[](https://github.com/distribution/distribution/actions/workflows/build.yml?query=workflow%3Abuild)
|
||||
[](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance)
|
||||
[](LICENSE)
|
||||
|
||||
## Quick start
|
||||
|
||||
Run the registry locally with the [default configuration](https://github.com/distribution/distribution/blob/main/cmd/registry/config-dev.yml):
|
||||
```
|
||||
docker run -d -p 5000:5000 --restart always --name registry distribution/distribution:edge
|
||||
```
|
||||
|
||||
*NOTE:* in order to run push/pull against the locally run registry you must allow
|
||||
your docker (containerd) engine to use _insecure_ registry by editing `/etc/docker/daemon.json` and subsequently restarting it
|
||||
```
|
||||
{
|
||||
"insecure-registries": ["host.docker.internal:5000"]
|
||||
}
|
||||
```
|
||||
|
||||
Now you are ready to use it:
|
||||
```
|
||||
docker pull alpine
|
||||
docker tag alpine localhost:5000/alpine
|
||||
docker push localhost:5000/alpine
|
||||
```
|
||||
|
||||
⚠️ Beware the default configuration uses [`filesystem` storage driver](https://github.com/distribution/distribution/blob/main/docs/content/storage-drivers/filesystem.md)
|
||||
and the above example command does not mount a local filesystem volume into the running container.
|
||||
If you wish to mount the local filesystem to the `rootdirectory` of the
|
||||
`filesystem` storage driver run the following command:
|
||||
```
|
||||
docker run -d -p 5000:5000 -v $PWD/FS/PATH:/var/lib/registry --restart always --name registry distribution/distribution:edge
|
||||
```
|
||||
|
||||
### Custom configuration
|
||||
|
||||
If you don't wan to use the default configuration file, you can supply
|
||||
your own custom configuration file as follows:
|
||||
```
|
||||
docker run -d -p 5000:5000 -v $PWD/PATH/TO/config.yml:/etc/distribution/config.yml --restart always --name registry distribution/distribution:edge
|
||||
```
|
||||
|
||||
## Communication
|
||||
|
||||
For async communication and long-running discussions please use issues and pull requests
|
||||
on the [GitHub repo](https://github.com/distribution/distribution).
|
||||
|
||||
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
|
||||
that everyone is welcome to join and chat about development.
|
28
docs/hugo.yaml
Normal file
28
docs/hugo.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
baseURL: https://distribution.github.io/distribution
|
||||
languageCode: en-us
|
||||
title: CNCF Distribution
|
||||
theme: hugo-geekdoc
|
||||
|
||||
markup:
|
||||
goldmark:
|
||||
renderer:
|
||||
unsafe: true
|
||||
|
||||
pluralizeListTitles: false
|
||||
enableRobotsTXT: true
|
||||
taxonomies: [tags]
|
||||
minify:
|
||||
disableHTML: true
|
||||
|
||||
# Geekdoc required configuration
|
||||
pygmentsUseClasses: true
|
||||
pygmentsCodeFences: true
|
||||
disablePathToLower: true
|
||||
|
||||
params:
|
||||
geekdocRepo: "https://github.com/distribution/distribution"
|
||||
geekdocEditPath: edit/main/docs
|
||||
geekdocLegalNotice: "https://www.linuxfoundation.org/legal/trademark-usage"
|
||||
geekdocContentLicense:
|
||||
name: CC BY 4.0
|
||||
link: https://creativecommons.org/licenses/by/4.0/
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue