forked from TrueCloudLab/distribution
Compare commits
5 commits
main
...
tcl/master
Author | SHA1 | Date | |
---|---|---|---|
7488fe2d43 | |||
ca2da5e23d | |||
953388ac54 | |||
5284b39b5f | |||
39e42437aa |
2946 changed files with 4745 additions and 990061 deletions
48
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
48
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
@ -1,48 +0,0 @@
|
||||||
name: Bug report
|
|
||||||
description: Create a report to help us improve
|
|
||||||
labels:
|
|
||||||
- kind/bug
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
Thank you for taking the time to report a bug!
|
|
||||||
If this is a security issue please report it to the [Distributions Security Mailing List](mailto:cncf-distribution-security@lists.cncf.io).
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: Please give a clear and concise description of the bug
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: repro
|
|
||||||
attributes:
|
|
||||||
label: Reproduce
|
|
||||||
description: Steps to reproduce the bug
|
|
||||||
placeholder: |
|
|
||||||
1. start registry version X ...
|
|
||||||
2. `docker push image:tag` ...
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: expected
|
|
||||||
attributes:
|
|
||||||
label: Expected behavior
|
|
||||||
description: What is the expected behavior?
|
|
||||||
placeholder: |
|
|
||||||
E.g. "registry returns an incorrect API error"
|
|
||||||
- type: textarea
|
|
||||||
id: version
|
|
||||||
attributes:
|
|
||||||
label: registry version
|
|
||||||
description: Output of `registry --version`. Alternatively tell us the docker image tag.
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: additional
|
|
||||||
attributes:
|
|
||||||
label: Additional Info
|
|
||||||
description: Additional info you want to provide such as logs, system info, environment, etc.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
8
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,8 +0,0 @@
|
||||||
blank_issues_enabled: false
|
|
||||||
contact_links:
|
|
||||||
- name: Security and Vulnerabilities
|
|
||||||
url: https://github.com/distribution/distribution/blob/main/SECURITY.md
|
|
||||||
about: Please report any security issues or vulnerabilities responsibly to the distribution maintainers team. Please do not use the public issue tracker.
|
|
||||||
- name: Questions and Discussions
|
|
||||||
url: https://github.com/distribution/distribution/discussions/new/choose
|
|
||||||
about: Use Github Discussions to ask questions and/or open discussion topics.
|
|
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
|
@ -1,12 +0,0 @@
|
||||||
name: Feature request
|
|
||||||
description: Missing functionality? Come tell us about it!
|
|
||||||
labels:
|
|
||||||
- kind/feature
|
|
||||||
body:
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
description: What is the feature you want to see?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
8
.github/dependabot.yml
vendored
8
.github/dependabot.yml
vendored
|
@ -1,8 +0,0 @@
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
61
.github/labeler.yml
vendored
61
.github/labeler.yml
vendored
|
@ -1,61 +0,0 @@
|
||||||
area/api:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/api/**
|
|
||||||
- registry/handlers/**
|
|
||||||
area/auth:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/auth/**
|
|
||||||
area/build:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- Makefile
|
|
||||||
- Dockerfile
|
|
||||||
- docker-bake.hcl
|
|
||||||
- dockerfiles/**
|
|
||||||
area/cache:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/storage/cache/**
|
|
||||||
area/ci:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- .github/**
|
|
||||||
- tests/**
|
|
||||||
- testutil/**
|
|
||||||
area/config:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- configuration/**
|
|
||||||
area/docs:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- README.md
|
|
||||||
- docs/**/*.md
|
|
||||||
area/proxy:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/proxy/**
|
|
||||||
area/storage:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/storage/**
|
|
||||||
area/storage/azure:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/storage/driver/azure/**
|
|
||||||
area/storage/gcs:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/storage/driver/gcs/**
|
|
||||||
area/storage/s3:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- registry/storage/driver/s3-aws/**
|
|
||||||
dependencies:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file:
|
|
||||||
- vendor/**
|
|
||||||
- go.mod
|
|
||||||
- go.sum
|
|
45
.github/workflows/build.yml
vendored
45
.github/workflows/build.yml
vendored
|
@ -27,18 +27,18 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
go:
|
go:
|
||||||
- 1.21.12
|
- 1.20.12
|
||||||
- 1.22.5
|
- 1.21.5
|
||||||
target:
|
target:
|
||||||
- test-coverage
|
- test-coverage
|
||||||
- test-cloud-storage
|
- test-cloud-storage
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Set up Go
|
name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
-
|
-
|
||||||
|
@ -47,7 +47,7 @@ jobs:
|
||||||
make ${{ matrix.target }}
|
make ${{ matrix.target }}
|
||||||
-
|
-
|
||||||
name: Codecov
|
name: Codecov
|
||||||
uses: codecov/codecov-action@v4
|
uses: codecov/codecov-action@v3
|
||||||
with:
|
with:
|
||||||
directory: ./
|
directory: ./
|
||||||
|
|
||||||
|
@ -62,13 +62,13 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
${{ env.DOCKERHUB_SLUG }}
|
${{ env.DOCKERHUB_SLUG }}
|
||||||
|
@ -94,53 +94,43 @@ jobs:
|
||||||
org.opencontainers.image.description=The toolkit to pack, ship, store, and distribute container content
|
org.opencontainers.image.description=The toolkit to pack, ship, store, and distribute container content
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Log in to GitHub Container registry
|
name: Log in to GitHub Container registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
-
|
-
|
||||||
name: Build artifacts
|
name: Build artifacts
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: artifact-all
|
targets: artifact-all
|
||||||
-
|
-
|
||||||
name: Rename provenance
|
name: Move artifacts
|
||||||
run: |
|
|
||||||
for pdir in ./bin/*/; do
|
|
||||||
(
|
|
||||||
cd "$pdir"
|
|
||||||
binname=$(find . -name '*.tar.gz')
|
|
||||||
filename=$(basename "${binname%.tar.gz}")
|
|
||||||
mv "provenance.json" "${filename}.provenance.json"
|
|
||||||
)
|
|
||||||
done
|
|
||||||
-
|
|
||||||
name: Move and list artifacts
|
|
||||||
run: |
|
run: |
|
||||||
mv ./bin/**/* ./bin/
|
mv ./bin/**/* ./bin/
|
||||||
tree -nh ./bin
|
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4.3.0
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: registry
|
name: registry
|
||||||
path: ./bin/*
|
path: ./bin/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
-
|
-
|
||||||
name: Build image
|
name: Build image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
|
@ -149,13 +139,12 @@ jobs:
|
||||||
push: ${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
push: ${{ github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') }}
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: |
|
files: |
|
||||||
bin/*.tar.gz
|
bin/*.tar.gz
|
||||||
bin/*.provenance.json
|
|
||||||
bin/*.sha256
|
bin/*.sha256
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
-
|
-
|
||||||
|
@ -44,12 +44,12 @@ jobs:
|
||||||
git checkout HEAD^2
|
git checkout HEAD^2
|
||||||
-
|
-
|
||||||
name: Initialize CodeQL
|
name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3.22.12
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
-
|
-
|
||||||
name: Autobuild
|
name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v3.22.12
|
uses: github/codeql-action/autobuild@v2
|
||||||
-
|
-
|
||||||
name: Perform CodeQL Analysis
|
name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v3.22.12
|
uses: github/codeql-action/analyze@v2
|
||||||
|
|
6
.github/workflows/conformance.yml
vendored
6
.github/workflows/conformance.yml
vendored
|
@ -17,12 +17,12 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
name: Build image
|
name: Build image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: image-local
|
targets: image-local
|
||||||
-
|
-
|
||||||
|
@ -49,7 +49,7 @@ jobs:
|
||||||
run: mkdir -p .out/ && mv {report.html,junit.xml} .out/
|
run: mkdir -p .out/ && mv {report.html,junit.xml} .out/
|
||||||
-
|
-
|
||||||
name: Upload test results
|
name: Upload test results
|
||||||
uses: actions/upload-artifact@v4.3.0
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: oci-test-results-${{ github.sha }}
|
name: oci-test-results-${{ github.sha }}
|
||||||
path: .out/
|
path: .out/
|
||||||
|
|
2
.github/workflows/dockerhub-readme.yml
vendored
2
.github/workflows/dockerhub-readme.yml
vendored
|
@ -27,7 +27,7 @@ jobs:
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
-
|
-
|
||||||
name: Update Docker Hub README
|
name: Update Docker Hub README
|
||||||
uses: peter-evans/dockerhub-description@v4
|
uses: peter-evans/dockerhub-description@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
13
.github/workflows/docs.yml
vendored
13
.github/workflows/docs.yml
vendored
|
@ -26,26 +26,27 @@ jobs:
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v4
|
uses: actions/configure-pages@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Build docs
|
- name: Build docs
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v3
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
docker-bake.hcl
|
docker-bake.hcl
|
||||||
targets: docs-export
|
targets: docs-export
|
||||||
provenance: false
|
|
||||||
set: |
|
set: |
|
||||||
*.cache-from=type=gha,scope=docs
|
*.cache-from=type=gha,scope=docs
|
||||||
*.cache-to=type=gha,scope=docs,mode=max
|
*.cache-to=type=gha,scope=docs,mode=max
|
||||||
|
env:
|
||||||
|
DOCS_BASEURL: ${{ steps.pages.outputs.base_path }}
|
||||||
- name: Fix permissions
|
- name: Fix permissions
|
||||||
run: |
|
run: |
|
||||||
chmod -c -R +rX "./build/docs" | while read line; do
|
chmod -c -R +rX "./build/docs" | while read line; do
|
||||||
echo "::warning title=Invalid file permissions automatically fixed::$line"
|
echo "::warning title=Invalid file permissions automatically fixed::$line"
|
||||||
done
|
done
|
||||||
- name: Upload Pages artifact
|
- name: Upload Pages artifact
|
||||||
uses: actions/upload-pages-artifact@v3
|
uses: actions/upload-pages-artifact@v2
|
||||||
with:
|
with:
|
||||||
path: ./build/docs
|
path: ./build/docs
|
||||||
|
|
||||||
|
@ -69,4 +70,4 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v4 # or the latest "vX.X.X" version tag for this action
|
uses: actions/deploy-pages@v2 # or the latest "vX.X.X" version tag for this action
|
||||||
|
|
6
.github/workflows/e2e.yml
vendored
6
.github/workflows/e2e.yml
vendored
|
@ -20,12 +20,12 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
name: Build image
|
name: Build image
|
||||||
uses: docker/bake-action@v5
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: image-local
|
targets: image-local
|
||||||
-
|
-
|
||||||
|
@ -42,7 +42,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
|
|
4
.github/workflows/fossa.yml
vendored
4
.github/workflows/fossa.yml
vendored
|
@ -17,9 +17,9 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Run FOSSA scan and upload build data
|
- name: Run FOSSA scan and upload build data
|
||||||
uses: fossa-contrib/fossa-action@v3
|
uses: fossa-contrib/fossa-action@v2
|
||||||
with:
|
with:
|
||||||
fossa-api-key: cac3dc8d4f2ba86142f6c0f2199a160f
|
fossa-api-key: cac3dc8d4f2ba86142f6c0f2199a160f
|
||||||
|
|
19
.github/workflows/label.yaml
vendored
19
.github/workflows/label.yaml
vendored
|
@ -1,19 +0,0 @@
|
||||||
name: Pull Request Labeler
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
labeler:
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/labeler@v5
|
|
||||||
with:
|
|
||||||
dot: true
|
|
8
.github/workflows/scorecards.yml
vendored
8
.github/workflows/scorecards.yml
vendored
|
@ -22,12 +22,12 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout code"
|
- name: "Checkout code"
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
|
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # tag=v3.0.0
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
- name: "Run analysis"
|
- name: "Run analysis"
|
||||||
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
|
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # tag=v2.0.6
|
||||||
with:
|
with:
|
||||||
results_file: results.sarif
|
results_file: results.sarif
|
||||||
results_format: sarif
|
results_format: sarif
|
||||||
|
@ -46,7 +46,7 @@ jobs:
|
||||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||||
# format to the repository Actions tab.
|
# format to the repository Actions tab.
|
||||||
- name: "Upload artifact"
|
- name: "Upload artifact"
|
||||||
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # tag=v4.3.0
|
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
||||||
with:
|
with:
|
||||||
name: SARIF file
|
name: SARIF file
|
||||||
path: results.sarif
|
path: results.sarif
|
||||||
|
@ -54,7 +54,7 @@ jobs:
|
||||||
|
|
||||||
# Upload the results to GitHub's code scanning dashboard.
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@1500a131381b66de0c52ac28abb13cd79f4b7ecc # tag=v2.22.12
|
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
|
||||||
|
|
2
.github/workflows/validate.yml
vendored
2
.github/workflows/validate.yml
vendored
|
@ -29,7 +29,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
run: |
|
run: |
|
||||||
|
|
|
@ -6,7 +6,7 @@ linters:
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
- revive
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- govet
|
- vet
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
- misspell
|
||||||
- bodyclose
|
- bodyclose
|
||||||
|
@ -22,7 +22,7 @@ linters-settings:
|
||||||
- name: unused-parameter
|
- name: unused-parameter
|
||||||
disabled: true
|
disabled: true
|
||||||
|
|
||||||
issues:
|
run:
|
||||||
deadline: 2m
|
deadline: 2m
|
||||||
exlude-dirs:
|
skip-dirs:
|
||||||
- vendor
|
- vendor
|
||||||
|
|
224
.mailmap
224
.mailmap
|
@ -1,194 +1,32 @@
|
||||||
Aaron Lehmann <alehmann@netflix.com>
|
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||||
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
|
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||||
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||||
Anders Ingemann <aim@orbit.online>
|
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||||
Andrew Meredith <andymeredith@gmail.com>
|
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
||||||
Andrew Meredith <andymeredith@gmail.com> <kendru@users.noreply.github.com>
|
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
|
||||||
Andrey Smirnov <andrey.smirnov@siderolabs.com>
|
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
||||||
Andrii Soldatenko <andrii.soldatenko@gmail.com>
|
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||||
Andrii Soldatenko <andrii.soldatenko@gmail.com> <andrii.soldatenko@dynatrace.com>
|
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||||
Anthony Ramahay <thewolt@gmail.com>
|
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
|
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
|
Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
|
||||||
Austin Vazquez <macedonv@amazon.com>
|
|
||||||
Benjamin Schanzel <benjamin.schanzel@bmw.de>
|
|
||||||
Brian Bland <brian.t.bland@gmail.com>
|
|
||||||
Brian Bland <brian.t.bland@gmail.com> <brian.bland@docker.com>
|
|
||||||
Brian Bland <brian.t.bland@gmail.com> <r4nd0m1n4t0r@gmail.com>
|
|
||||||
Chad Faragher <wyckster@hotmail.com>
|
|
||||||
Cory Snider <csnider@mirantis.com>
|
|
||||||
CrazyMax <github@crazymax.dev>
|
|
||||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
|
||||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
|
||||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
|
||||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
|
||||||
Daniel Nephin <dnephin@gmail.com>
|
|
||||||
Daniel Nephin <dnephin@gmail.com> <dnephin@docker.com>
|
|
||||||
David Karlsson <david.karlsson@docker.com>
|
|
||||||
David Karlsson <david.karlsson@docker.com> <35727626+dvdksn@users.noreply.github.com>
|
|
||||||
David Wu <dwu7401@gmail.com>
|
|
||||||
David Wu <dwu7401@gmail.com> <david.wu@docker.com>
|
|
||||||
Derek McGowan <derek@mcg.dev>
|
|
||||||
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
|
|
||||||
Dimitar Kostadinov <dimitar.kostadinov@sap.com>
|
|
||||||
Doug Davis <dug@us.ibm.com>
|
|
||||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
|
||||||
Emmanuel Ferdman <emmanuelferdman@gmail.com>
|
|
||||||
Eng Zer Jun <engzerjun@gmail.com>
|
|
||||||
Eric Yang <windfarer@gmail.com>
|
|
||||||
Eric Yang <windfarer@gmail.com> <Windfarer@users.noreply.github.com>
|
|
||||||
Eric Yang <windfarer@gmail.com> <qizhao.yang@daocloud.io>
|
|
||||||
Erica Windisch <erica@windisch.us>
|
|
||||||
Erica Windisch <erica@windisch.us> <eric@windisch.us>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume.charmes@dotcloud.com>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@charmes.net>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@docker.com>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com> <guillaume@dotcloud.com>
|
|
||||||
Hayley Swimelar <hswimelar@gmail.com>
|
|
||||||
Ismail Alidzhikov <i.alidjikov@gmail.com>
|
|
||||||
Jaime Martinez <jmartinez@gitlab.com>
|
|
||||||
James Hewitt <james.hewitt@uk.ibm.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@docker.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@google.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <acidburn@microsoft.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <jess@docker.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <jess@mesosphere.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <jessfraz@google.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <jfrazelle@users.noreply.github.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <me@jessfraz.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer> <princess@docker.com>
|
|
||||||
Joao Fernandes <joaofnfernandes@gmail.com>
|
|
||||||
Joao Fernandes <joaofnfernandes@gmail.com> <joao.fernandes@docker.com>
|
|
||||||
João Pereira <484633+joaodrp@users.noreply.github.com>
|
|
||||||
Joffrey F <joffrey@docker.com>
|
|
||||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
|
||||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
|
||||||
Johan Euphrosine <proppy@google.com>
|
|
||||||
Johan Euphrosine <proppy@google.com> <proppy@aminche.com>
|
|
||||||
John Howard <github@lowenna.com>
|
|
||||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
|
||||||
Josh Hawn <jlhawn@berkeley.edu>
|
|
||||||
Josh Hawn <jlhawn@berkeley.edu> <josh.hawn@docker.com>
|
|
||||||
Joyce Brum <joycebrumu.u@gmail.com>
|
|
||||||
Joyce Brum <joycebrumu.u@gmail.com> <joycebrum@google.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
|
||||||
Kirat Singh <kirat.singh@gmail.com>
|
|
||||||
Kirat Singh <kirat.singh@gmail.com> <kirat.singh@beacon.io>
|
|
||||||
Kirat Singh <kirat.singh@gmail.com> <kirat.singh@wsq.io>
|
|
||||||
Kyle Squizzato <ksquizz@gmail.com>
|
|
||||||
Liang Zheng <zhengliang0901@gmail.com>
|
|
||||||
Luca Bruno <lucab@debian.org>
|
|
||||||
Luca Bruno <lucab@debian.org> <luca.bruno@coreos.com>
|
|
||||||
Mahmoud Kandil <47168819+MahmoudKKandil@users.noreply.github.com>
|
|
||||||
Manish Tomar <manish.tomar@docker.com>
|
|
||||||
Manish Tomar <manish.tomar@docker.com> <manishtomar@users.noreply.github.com>
|
|
||||||
Maria Bermudez <bermudez.mt@gmail.com>
|
|
||||||
Maria Bermudez <bermudez.mt@gmail.com> <bermudezmt@users.noreply.github.com>
|
|
||||||
Markus Thömmes <markusthoemmes@me.com>
|
|
||||||
Matt Linville <matt@linville.me>
|
|
||||||
Matt Linville <matt@linville.me> <misty@apache.org>
|
|
||||||
Matt Linville <matt@linville.me> <misty@docker.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <crosby.michael@gmail.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@crosbymichael.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@docker.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
|
||||||
Michal Minar <miminar@redhat.com>
|
|
||||||
Michal Minar <miminar@redhat.com> Michal Minář <miminar@redhat.com>
|
|
||||||
Mike Brown <brownwm@us.ibm.com>
|
|
||||||
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
|
|
||||||
Mikel Rychliski <mikel@mikelr.com>
|
|
||||||
Milos Gajdos <milosthegajdos@gmail.com>
|
|
||||||
Milos Gajdos <milosthegajdos@gmail.com> <1392526+milosgajdos@users.noreply.github.com>
|
|
||||||
Milos Gajdos <milosthegajdos@gmail.com> <milosgajdos83@gmail.com>
|
|
||||||
Nikita Tarasov <nikita@mygento.ru>
|
|
||||||
Nikita Tarasov <nikita@mygento.ru> <luckyraul@users.noreply.github.com>
|
|
||||||
Oleg Bulatov <oleg@bulatov.me>
|
|
||||||
Oleg Bulatov <oleg@bulatov.me> <obulatov@redhat.com>
|
|
||||||
Olivier Gambier <olivier@docker.com>
|
|
||||||
Olivier Gambier <olivier@docker.com> <dmp42@users.noreply.github.com>
|
|
||||||
Omer Cohen <git@omer.io>
|
|
||||||
Omer Cohen <git@omer.io> <git@omerc.net>
|
|
||||||
Paul Meyer <49727155+katexochen@users.noreply.github.com>
|
|
||||||
Per Lundberg <perlun@gmail.com>
|
|
||||||
Per Lundberg <perlun@gmail.com> <per.lundberg@ecraft.com>
|
|
||||||
Peter Dave Hello <hsu@peterdavehello.org>
|
|
||||||
Peter Dave Hello <hsu@peterdavehello.org> <PeterDaveHello@users.noreply.github.com>
|
|
||||||
Phil Estes <estesp@gmail.com>
|
|
||||||
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
|
|
||||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
|
||||||
Richard Scothern <richard.scothern@gmail.com>
|
|
||||||
Richard Scothern <richard.scothern@gmail.com> <richard.scothern@docker.com>
|
|
||||||
Rober Morales-Chaparro <rober.morales@rstor.io>
|
|
||||||
Rober Morales-Chaparro <rober.morales@rstor.io> <rober@rstor.io>
|
|
||||||
Robin Ketelbuters <robin.ketelbuters@gmail.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com> <mrwacky42@users.noreply.github.com>
|
|
||||||
Solomon Hykes <solomon@dagger.io>
|
|
||||||
Solomon Hykes <solomon@dagger.io> <s@docker.com>
|
|
||||||
Solomon Hykes <solomon@dagger.io> <solomon.hykes@dotcloud.com>
|
|
||||||
Solomon Hykes <solomon@dagger.io> <solomon@docker.com>
|
|
||||||
Solomon Hykes <solomon@dagger.io> <solomon@dotcloud.com>
|
|
||||||
Stephen Day <stevvooe@gmail.com>
|
|
||||||
Stephen Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
|
||||||
Stephen Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
|
||||||
Steven Kalt <SKalt@users.noreply.github.com>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
|
||||||
Sylvain DESGRAIS <sylvain.desgrais@gmail.com>
|
|
||||||
Tadeusz Dudkiewicz <tadeusz.dudkiewicz@rtbhouse.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <dev@vvieux.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <victor.vieux@docker.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <victor.vieux@dotcloud.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <victor@docker.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <victor@dotcloud.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <victorvieux@gmail.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com> <vieux@docker.com>
|
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
|
||||||
Victoria Bialas <victoria.bialas@docker.com> <londoncalling@users.noreply.github.com>
|
|
||||||
Vincent Batts <vbatts@redhat.com>
|
|
||||||
Vincent Batts <vbatts@redhat.com> <vbatts@hashbangbash.com>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent+github@demeester.fr>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@demeester.fr>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com> <vincent@sbr.pm>
|
|
||||||
Vincent Giersch <vincent@giersch.fr>
|
|
||||||
Vincent Giersch <vincent@giersch.fr> <vincent.giersch@ovh.net>
|
|
||||||
Wang Yan <wangyan@vmware.com>
|
|
||||||
Wen-Quan Li <legendarilylwq@gmail.com>
|
|
||||||
Wen-Quan Li <legendarilylwq@gmail.com> <wenquan.li@hp.com>
|
|
||||||
Wen-Quan Li <legendarilylwq@gmail.com> <wenquan.li@hpe.com>
|
|
||||||
Yu Wang <yuwa@microsoft.com>
|
|
||||||
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
|
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
|
||||||
baojiangnan <baojiangnan@meituan.com>
|
Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local>
|
||||||
baojiangnan <baojiangnan@meituan.com> <baojn1998@163.com>
|
Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email>
|
||||||
erezrokah <erezrokah@users.noreply.github.com>
|
Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com>
|
||||||
goodactive <goodactive@qq.com>
|
Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com>
|
||||||
gotgelf <gotgelf@gmail.com>
|
Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io>
|
||||||
guoguangwu <guoguangwug@gmail.com>
|
Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com>
|
||||||
harche <p.harshal@gmail.com>
|
Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||||
harche <p.harshal@gmail.com> <harche@users.noreply.github.com>
|
Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com>
|
||||||
icefed <zlwangel@gmail.com>
|
Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
|
||||||
oliver-goetz <o.goetz@sap.com>
|
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
|
||||||
xiaoxiangxianzi <zhaoyizheng@outlook.com>
|
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
|
||||||
|
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
|
||||||
|
|
530
AUTHORS
530
AUTHORS
|
@ -1,530 +0,0 @@
|
||||||
# This file lists all individuals having contributed content to the repository.
|
|
||||||
# For how it is generated, see dockerfiles/authors.Dockerfile.
|
|
||||||
|
|
||||||
a-palchikov <deemok@gmail.com>
|
|
||||||
Aaron Lehmann <alehmann@netflix.com>
|
|
||||||
Aaron Schlesinger <aschlesinger@deis.com>
|
|
||||||
Aaron Vinson <avinson.public@gmail.com>
|
|
||||||
Adam Dobrawy <ad-m@users.noreply.github.com>
|
|
||||||
Adam Duke <adam.v.duke@gmail.com>
|
|
||||||
Adam Enger <adamenger@gmail.com>
|
|
||||||
Adam Kaplan <adam.kaplan@redhat.com>
|
|
||||||
Adam Wolfe Gordon <awg@digitalocean.com>
|
|
||||||
AdamKorcz <adam@adalogics.com>
|
|
||||||
Adrian Mouat <adrian.mouat@gmail.com>
|
|
||||||
Adrian Plata <adrian.plata@docker.com>
|
|
||||||
Adrien Duermael <adrien@duermael.com>
|
|
||||||
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
|
|
||||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
|
||||||
Aleksejs Sinicins <monder@monder.cc>
|
|
||||||
Alex <aleksandrosansan@gmail.com>
|
|
||||||
Alex Chan <alex.chan@metaswitch.com>
|
|
||||||
Alex Elman <aelman@indeed.com>
|
|
||||||
Alex Laties <agl@tumblr.com>
|
|
||||||
Alexander Larsson <alexl@redhat.com>
|
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
|
||||||
Alexey Gladkov <gladkov.alexey@gmail.com>
|
|
||||||
Alfonso Acosta <fons@syntacticsugar.consulting>
|
|
||||||
allencloud <allen.sun@daocloud.io>
|
|
||||||
Alvin Feng <alvin4feng@yahoo.com>
|
|
||||||
amitshukla <ashukla73@hotmail.com>
|
|
||||||
Amy Lindburg <amy.lindburg@docker.com>
|
|
||||||
Andreas Hassing <andreas@famhassing.dk>
|
|
||||||
Andrew Bulford <andrew.bulford@redmatter.com>
|
|
||||||
Andrew Hsu <andrewhsu@acm.org>
|
|
||||||
Andrew Lavery <laverya@umich.edu>
|
|
||||||
Andrew Leung <anwleung@gmail.com>
|
|
||||||
Andrew Lively <andrew.lively2@gmail.com>
|
|
||||||
Andrew Meredith <andymeredith@gmail.com>
|
|
||||||
Andrew T Nguyen <andrew.nguyen@docker.com>
|
|
||||||
Andrews Medina <andrewsmedina@gmail.com>
|
|
||||||
Andrey Kostov <kostov.andrey@gmail.com>
|
|
||||||
Andrii Soldatenko <andrii.soldatenko@gmail.com>
|
|
||||||
Andy Goldstein <agoldste@redhat.com>
|
|
||||||
andyzhangx <xiazhang@microsoft.com>
|
|
||||||
Anian Z <ziegler@sicony.de>
|
|
||||||
Anil Belur <askb23@gmail.com>
|
|
||||||
Anis Elleuch <vadmeste@gmail.com>
|
|
||||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
|
||||||
Anne Henmi <41210220+ahh-docker@users.noreply.github.com>
|
|
||||||
Anton Tiurin <noxiouz@yandex.ru>
|
|
||||||
Antonio Mercado <amercado@thinknode.com>
|
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com>
|
|
||||||
Antonio Ojea <antonio.ojea.garcia@gmail.com>
|
|
||||||
Anusha Ragunathan <anusha@docker.com>
|
|
||||||
Arien Holthuizen <aholthuizen@schubergphilis.com>
|
|
||||||
Arko Dasgupta <arkodg@users.noreply.github.com>
|
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
|
||||||
Arthur Baars <arthur@semmle.com>
|
|
||||||
Arthur Gautier <baloo@gandi.net>
|
|
||||||
Asuka Suzuki <hello@tanksuzuki.com>
|
|
||||||
Avi Miller <avi.miller@oracle.com>
|
|
||||||
Aviral Takkar <aviral26@users.noreply.github.com>
|
|
||||||
Ayose Cazorla <ayosec@gmail.com>
|
|
||||||
BadZen <dave.trombley@gmail.com>
|
|
||||||
baojiangnan <baojiangnan@meituan.com>
|
|
||||||
Ben Bodenmiller <bbodenmiller@hotmail.com>
|
|
||||||
Ben De St Paer-Gotch <bende@outlook.com>
|
|
||||||
Ben Emamian <ben@ictace.com>
|
|
||||||
Ben Firshman <ben@firshman.co.uk>
|
|
||||||
Ben Kochie <superq@gmail.com>
|
|
||||||
Ben Manuel <ben.manuel@procore.com>
|
|
||||||
Bhavin Gandhi <bhavin192@users.noreply.github.com>
|
|
||||||
Bill <NonCreature0714@users.noreply.github.com>
|
|
||||||
bin liu <liubin0329@gmail.com>
|
|
||||||
Bouke van der Bijl <me@bou.ke>
|
|
||||||
Bracken Dawson <abdawson@gmail.com>
|
|
||||||
Brandon Mitchell <git@bmitch.net>
|
|
||||||
Brandon Philips <brandon@ifup.co>
|
|
||||||
Brett Higgins <brhiggins@arbor.net>
|
|
||||||
Brian Bland <brian.t.bland@gmail.com>
|
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
|
||||||
burnettk <burnettk@gmail.com>
|
|
||||||
Caleb Spare <cespare@gmail.com>
|
|
||||||
Carson A <ca@carsonoid.net>
|
|
||||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
|
||||||
Chad Faragher <wyckster@hotmail.com>
|
|
||||||
Chaos John <chaosjohn.yjh@icloud.com>
|
|
||||||
Charles Smith <charles.smith@docker.com>
|
|
||||||
Cheng Zheng <chengzheng.apply@gmail.com>
|
|
||||||
chlins <chenyuzh@vmware.com>
|
|
||||||
Chris Aniszczyk <caniszczyk@gmail.com>
|
|
||||||
Chris Dillon <squarism@gmail.com>
|
|
||||||
Chris K. Wong <chriskw.xyz@gmail.com>
|
|
||||||
Chris Patterson <chrispat@github.com>
|
|
||||||
Christopher Yeleighton <ne01026@shark.2a.pl>
|
|
||||||
Christy Perez <christy@linux.vnet.ibm.com>
|
|
||||||
Chuanying Du <cydu@google.com>
|
|
||||||
Clayton Coleman <ccoleman@redhat.com>
|
|
||||||
Collin Shoop <cshoop@digitalocean.com>
|
|
||||||
Corey Quon <corey.quon@gmail.com>
|
|
||||||
Cory Snider <csnider@mirantis.com>
|
|
||||||
CrazyMax <github@crazymax.dev>
|
|
||||||
cressie176 <github@stephen-cresswell.net>
|
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
|
||||||
cui fliter <imcusg@gmail.com>
|
|
||||||
cuiwei13 <cuiwei13@pku.edu.cn>
|
|
||||||
cyli <cyli@twistedmatrix.com>
|
|
||||||
Daehyeok Mun <daehyeok@gmail.com>
|
|
||||||
Daisuke Fujita <dtanshi45@gmail.com>
|
|
||||||
Damien Mathieu <dmathieu@salesforce.com>
|
|
||||||
Dan Fredell <furtchet@gmail.com>
|
|
||||||
Dan Walsh <dwalsh@redhat.com>
|
|
||||||
Daniel Helfand <helfand.4@gmail.com>
|
|
||||||
Daniel Huhn <daniel@danielhuhn.de>
|
|
||||||
Daniel Menet <membership@sontags.ch>
|
|
||||||
Daniel Mizyrycki <mzdaniel@glidelink.net>
|
|
||||||
Daniel Nephin <dnephin@gmail.com>
|
|
||||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
|
||||||
Danila Fominykh <dancheg97@fmnx.su>
|
|
||||||
Darren Shepherd <darren@rancher.com>
|
|
||||||
Dave <david.warshaw@gmail.com>
|
|
||||||
Dave Trombley <dave.trombley@gmail.com>
|
|
||||||
Dave Tucker <dt@docker.com>
|
|
||||||
David Calavera <david.calavera@gmail.com>
|
|
||||||
David Justice <david@devigned.com>
|
|
||||||
David Karlsson <david.karlsson@docker.com>
|
|
||||||
David Lawrence <david.lawrence@docker.com>
|
|
||||||
David Luu <david@davidluu.info>
|
|
||||||
David Mackey <tdmackey@booleanhaiku.com>
|
|
||||||
David van der Spek <vanderspek.david@gmail.com>
|
|
||||||
David Verhasselt <david@crowdway.com>
|
|
||||||
David Wu <dwu7401@gmail.com>
|
|
||||||
David Xia <dxia@spotify.com>
|
|
||||||
Dawn W Docker <dawn.wood@users.noreply.github.com>
|
|
||||||
ddelange <14880945+ddelange@users.noreply.github.com>
|
|
||||||
Dejan Golja <dejan@golja.org>
|
|
||||||
Denis Andrejew <da.colonel@gmail.com>
|
|
||||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
|
||||||
Derek <crq@kernel.org>
|
|
||||||
Derek McGowan <derek@mcg.dev>
|
|
||||||
Deshi Xiao <xiaods@gmail.com>
|
|
||||||
Dimitar Kostadinov <dimitar.kostadinov@sap.com>
|
|
||||||
Diogo Mónica <diogo.monica@gmail.com>
|
|
||||||
DJ Enriquez <dj.enriquez@infospace.com>
|
|
||||||
Djibril Koné <kone.djibril@gmail.com>
|
|
||||||
dmp <dmp@loaner.local>
|
|
||||||
Don Bowman <don@agilicus.com>
|
|
||||||
Don Kjer <don.kjer@gmail.com>
|
|
||||||
Donald Huang <don.hcd@gmail.com>
|
|
||||||
Doug Davis <dug@us.ibm.com>
|
|
||||||
drornir <drornir@users.noreply.github.com>
|
|
||||||
duanhongyi <duanhongyi@doopai.com>
|
|
||||||
ducksecops <daniel@ducksecops.uk>
|
|
||||||
E. M. Bray <erik.m.bray@gmail.com>
|
|
||||||
Edgar Lee <edgar.lee@docker.com>
|
|
||||||
Elliot Pahl <elliot.pahl@gmail.com>
|
|
||||||
elsanli(李楠) <elsanli@tencent.com>
|
|
||||||
Elton Stoneman <elton@sixeyed.com>
|
|
||||||
Emmanuel Briney <emmanuel.briney@docker.com>
|
|
||||||
Eng Zer Jun <engzerjun@gmail.com>
|
|
||||||
Eohyung Lee <liquidnuker@gmail.com>
|
|
||||||
Eric Yang <windfarer@gmail.com>
|
|
||||||
Erica Windisch <erica@windisch.us>
|
|
||||||
Erik Hollensbe <github@hollensbe.org>
|
|
||||||
Etki <etki@etki.me>
|
|
||||||
Eugene Lubarsky <eug48@users.noreply.github.com>
|
|
||||||
eyjhb <eyjhbb@gmail.com>
|
|
||||||
eyjhbb@gmail.com <eyjhbb@gmail.com>
|
|
||||||
Fabio Berchtold <jamesclonk@jamesclonk.ch>
|
|
||||||
Fabio Falci <fabiofalci@gmail.com>
|
|
||||||
Fabio Huser <fabio@fh1.ch>
|
|
||||||
farmerworking <farmerworking@gmail.com>
|
|
||||||
fate-grand-order <chenjg@harmonycloud.cn>
|
|
||||||
Felix Bünemann <buenemann@louis.info>
|
|
||||||
Felix Yan <felixonmars@archlinux.org>
|
|
||||||
Feng Honglin <tifayuki@gmail.com>
|
|
||||||
Fernando Mayo Fernandez <fernando@undefinedlabs.com>
|
|
||||||
Flavian Missi <fmissi@redhat.com>
|
|
||||||
Florentin Raud <florentin.raud@gmail.com>
|
|
||||||
forkbomber <forkbomber@users.noreply.github.com>
|
|
||||||
Frank Chen <frankchn@gmail.com>
|
|
||||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
|
||||||
Gabor Nagy <mail@aigeruth.hu>
|
|
||||||
gabriell nascimento <gabriell@bluesoft.com.br>
|
|
||||||
Gaetan <gdevillele@gmail.com>
|
|
||||||
gary schaetz <gary@schaetzkc.com>
|
|
||||||
gbarr01 <gwendolynne.barr@docker.com>
|
|
||||||
Geoffrey Hausheer <rc2012@pblue.org>
|
|
||||||
ghodsizadeh <mehdi.ghodsizadeh@gmail.com>
|
|
||||||
Giovanni Toraldo <giovanni.toraldo@eng.it>
|
|
||||||
Gladkov Alexey <agladkov@redhat.com>
|
|
||||||
Gleb M Borisov <borisov.gleb@gmail.com>
|
|
||||||
Gleb Schukin <gschukin@ptsecurity.com>
|
|
||||||
glefloch <glfloch@gmail.com>
|
|
||||||
Glyn Owen Hanmer <1295698+glynternet@users.noreply.github.com>
|
|
||||||
gotgelf <gotgelf@gmail.com>
|
|
||||||
Grachev Mikhail <work@mgrachev.com>
|
|
||||||
Grant Watters <grant.watters@docker.com>
|
|
||||||
Greg Rebholz <gregrebholz@gmail.com>
|
|
||||||
Guillaume J. Charmes <charmes.guillaume@gmail.com>
|
|
||||||
Guillaume Rose <guillaume.rose@docker.com>
|
|
||||||
Gábor Lipták <gliptak@gmail.com>
|
|
||||||
harche <p.harshal@gmail.com>
|
|
||||||
hasheddan <georgedanielmangum@gmail.com>
|
|
||||||
Hayley Swimelar <hswimelar@gmail.com>
|
|
||||||
Helen-xie <xieyulin821@harmonycloud.cn>
|
|
||||||
Henri Gomez <henri.gomez@gmail.com>
|
|
||||||
Honglin Feng <tifayuki@gmail.com>
|
|
||||||
Hu Keping <hukeping@huawei.com>
|
|
||||||
Hua Wang <wanghua.humble@gmail.com>
|
|
||||||
HuKeping <hukeping@huawei.com>
|
|
||||||
Huu Nguyen <whoshuu@gmail.com>
|
|
||||||
ialidzhikov <i.alidjikov@gmail.com>
|
|
||||||
Ian Babrou <ibobrik@gmail.com>
|
|
||||||
iasoon <ilion.beyst@gmail.com>
|
|
||||||
igayoso <igayoso@gmail.com>
|
|
||||||
Igor Dolzhikov <bluesriverz@gmail.com>
|
|
||||||
Igor Morozov <igmorv@gmail.com>
|
|
||||||
Ihor Dvoretskyi <ihor@linux.com>
|
|
||||||
Ilion Beyst <ilion.beyst@gmail.com>
|
|
||||||
Ina Panova <ipanova@redhat.com>
|
|
||||||
Irene Diez <idiez@redhat.com>
|
|
||||||
Ismail Alidzhikov <i.alidjikov@gmail.com>
|
|
||||||
Jack Baines <jack.baines@uk.ibm.com>
|
|
||||||
Jack Griffin <jackpg14@gmail.com>
|
|
||||||
Jacob Atzen <jatzen@gmail.com>
|
|
||||||
Jake Moshenko <jake@devtable.com>
|
|
||||||
Jakob Ackermann <das7pad@outlook.com>
|
|
||||||
Jakub Mikulas <jakub@mikul.as>
|
|
||||||
James Findley <jfindley@fastmail.com>
|
|
||||||
James Hewitt <james.hewitt@uk.ibm.com>
|
|
||||||
James Lal <james@lightsofapollo.com>
|
|
||||||
Jason Freidman <jason.freidman@gmail.com>
|
|
||||||
Jason Heiss <jheiss@aput.net>
|
|
||||||
Javier Palomo Almena <javier.palomo.almena@gmail.com>
|
|
||||||
jdolitsky <393494+jdolitsky@users.noreply.github.com>
|
|
||||||
Jeff Nickoloff <jeff@allingeek.com>
|
|
||||||
Jeffrey van Gogh <jvg@google.com>
|
|
||||||
jerae-duffin <83294991+jerae-duffin@users.noreply.github.com>
|
|
||||||
Jeremy THERIN <jtherin@scaleway.com>
|
|
||||||
Jesse Brown <jabrown85@gmail.com>
|
|
||||||
Jesse Haka <haka.jesse@gmail.com>
|
|
||||||
Jessica Frazelle <jess@oxide.computer>
|
|
||||||
jhaohai <jhaohai@foxmail.com>
|
|
||||||
Jianqing Wang <tsing@jianqing.org>
|
|
||||||
Jihoon Chung <jihoon@gmail.com>
|
|
||||||
Jim Galasyn <jim.galasyn@docker.com>
|
|
||||||
Joao Fernandes <joaofnfernandes@gmail.com>
|
|
||||||
Joffrey F <joffrey@docker.com>
|
|
||||||
Johan Euphrosine <proppy@google.com>
|
|
||||||
John Howard <github@lowenna.com>
|
|
||||||
John Mulhausen <john@docker.com>
|
|
||||||
John Starks <jostarks@microsoft.com>
|
|
||||||
Jon Johnson <jonjohnson@google.com>
|
|
||||||
Jon Poler <jonathan.poler@apcera.com>
|
|
||||||
Jonas Hecht <jonas.hecht@codecentric.de>
|
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
|
||||||
Jonathan Lee <jonjohn1232009@gmail.com>
|
|
||||||
Jonathan Rudenberg <jonathan@titanous.com>
|
|
||||||
Jordan Liggitt <jliggitt@redhat.com>
|
|
||||||
Jose D. Gomez R <jose.gomez@suse.com>
|
|
||||||
Josh Chorlton <josh.chorlton@docker.com>
|
|
||||||
Josh Dolitsky <josh@dolit.ski>
|
|
||||||
Josh Hawn <jlhawn@berkeley.edu>
|
|
||||||
Josiah Kiehl <jkiehl@riotgames.com>
|
|
||||||
Joyce Brum <joycebrumu.u@gmail.com>
|
|
||||||
João Pereira <484633+joaodrp@users.noreply.github.com>
|
|
||||||
Julien Bordellier <1444415+jstoja@users.noreply.github.com>
|
|
||||||
Julien Fernandez <julien.fernandez@gmail.com>
|
|
||||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
|
||||||
Justin I. Nevill <JustinINevill@users.noreply.github.com>
|
|
||||||
Justin Santa Barbara <justin@fathomdb.com>
|
|
||||||
kaiwentan <kaiwentan@harmonycloud.cn>
|
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
|
||||||
Keerthan Mala <kmala@engineyard.com>
|
|
||||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
|
||||||
Ken Cochrane <KenCochrane@gmail.com>
|
|
||||||
Kenneth Lim <kennethlimcp@gmail.com>
|
|
||||||
Kenny Leung <kleung@google.com>
|
|
||||||
Kevin Lin <kevin@kelda.io>
|
|
||||||
Kevin Robatel <kevinrob2@gmail.com>
|
|
||||||
Kira <me@imkira.com>
|
|
||||||
Kirat Singh <kirat.singh@gmail.com>
|
|
||||||
L-Hudson <44844738+L-Hudson@users.noreply.github.com>
|
|
||||||
Lachlan Cooper <lachlancooper@gmail.com>
|
|
||||||
Laura Brehm <laurabrehm@hey.com>
|
|
||||||
Lei Jitang <leijitang@huawei.com>
|
|
||||||
Lenny Linux <tippexs91@googlemail.com>
|
|
||||||
Leonardo Azize Martins <lazize@users.noreply.github.com>
|
|
||||||
leonstrand <leonstrand@gmail.com>
|
|
||||||
Li Yi <denverdino@gmail.com>
|
|
||||||
Liam White <liamwhite@uk.ibm.com>
|
|
||||||
libo.huang <huanglibo2010@gmail.com>
|
|
||||||
LingFaKe <lingfake@huawei.com>
|
|
||||||
Liron Levin <liron@twistlock.com>
|
|
||||||
lisong <lisong@cdsunrise.net>
|
|
||||||
Littlemoon917 <18084421+Littlemoon917@users.noreply.github.com>
|
|
||||||
Liu Hua <sdu.liu@huawei.com>
|
|
||||||
liuchang0812 <liuchang0812@gmail.com>
|
|
||||||
liyongxin <yxli@alauda.io>
|
|
||||||
Lloyd Ramey <lnr0626@gmail.com>
|
|
||||||
lostsquirrel <lostsquirreli@hotmail.com>
|
|
||||||
Louis Kottmann <louis.kottmann@gmail.com>
|
|
||||||
Luca Bruno <lucab@debian.org>
|
|
||||||
Lucas França de Oliveira <lucasfdo@palantir.com>
|
|
||||||
Lucas Santos <lhs.santoss@gmail.com>
|
|
||||||
Luis Lobo Borobia <luislobo@gmail.com>
|
|
||||||
Luke Carpenter <x@rubynerd.net>
|
|
||||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
|
||||||
Makoto Oda <truth_jp_4133@yahoo.co.jp>
|
|
||||||
mallchin <mallchin@mac.com>
|
|
||||||
Manish Tomar <manish.tomar@docker.com>
|
|
||||||
Marco Hennings <marco.hennings@freiheit.com>
|
|
||||||
Marcus Martins <marcus@docker.com>
|
|
||||||
Maria Bermudez <bermudez.mt@gmail.com>
|
|
||||||
Mark Sagi-Kazar <mark.sagikazar@gmail.com>
|
|
||||||
Mary Anthony <mary@docker.com>
|
|
||||||
Masataka Mizukoshi <m.mizukoshi.wakuwaku@gmail.com>
|
|
||||||
Matin Rahmanian <itsmatinx@gmail.com>
|
|
||||||
MATSUMOTO TAKEAKI <takeaki.matsumoto@linecorp.com>
|
|
||||||
Matt Bentley <mbentley@mbentley.net>
|
|
||||||
Matt Duch <matt@learnmetrics.com>
|
|
||||||
Matt Linville <matt@linville.me>
|
|
||||||
Matt Moore <mattmoor@google.com>
|
|
||||||
Matt Robenolt <matt@ydekproductions.com>
|
|
||||||
Matt Tescher <matthew.tescher@docker.com>
|
|
||||||
Matthew Balvanz <matthew.balvanz@workiva.com>
|
|
||||||
Matthew Green <greenmr@live.co.uk>
|
|
||||||
Matthew Riley <mattdr@google.com>
|
|
||||||
Maurice Sotzny <ailuridae@users.noreply.github.com>
|
|
||||||
Meaglith Ma <genedna@gmail.com>
|
|
||||||
Michael Bonfils <bonfils.michael@protonmail.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com>
|
|
||||||
Michael Prokop <mika@grml.org>
|
|
||||||
Michael Vetter <jubalh@iodoru.org>
|
|
||||||
Michal Fojtik <mfojtik@redhat.com>
|
|
||||||
Michal Gebauer <mishak@mishak.net>
|
|
||||||
Michal Guerquin <michalg@allenai.org>
|
|
||||||
Michal Minar <miminar@redhat.com>
|
|
||||||
Mike Brown <brownwm@us.ibm.com>
|
|
||||||
Mike Lundy <mike@fluffypenguin.org>
|
|
||||||
Mike Truman <miketruman42@gmail.com>
|
|
||||||
Milos Gajdos <milosthegajdos@gmail.com>
|
|
||||||
Miquel Sabaté <msabate@suse.com>
|
|
||||||
mlmhl <409107750@qq.com>
|
|
||||||
Monika Katiyar <monika@jeavio.com>
|
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
|
||||||
moxiegirl <mary@docker.com>
|
|
||||||
mqliang <mqliang.zju@gmail.com>
|
|
||||||
Muesli <solom.emmanuel@gmail.com>
|
|
||||||
Nan Monnand Deng <monnand@gmail.com>
|
|
||||||
Nat Zimmermann <ntzm@users.noreply.github.com>
|
|
||||||
Nathan Sullivan <nathan@nightsys.net>
|
|
||||||
Naveed Jamil <naveed.jamil@tenpearl.com>
|
|
||||||
Neil Wilson <neil@aldur.co.uk>
|
|
||||||
nevermosby <robolwq@qq.com>
|
|
||||||
Nghia Tran <tcnghia@gmail.com>
|
|
||||||
Nicolas De Loof <nicolas.deloof@gmail.com>
|
|
||||||
Nikita Tarasov <nikita@mygento.ru>
|
|
||||||
ning xie <andy.xning@gmail.com>
|
|
||||||
Nishant Totla <nishanttotla@gmail.com>
|
|
||||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
|
||||||
Novak Ivanovski <novakivanovski@gmail.com>
|
|
||||||
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
|
|
||||||
Nycholas de Oliveira e Oliveira <nycholas@gmail.com>
|
|
||||||
Oilbeater <liumengxinfly@gmail.com>
|
|
||||||
Oleg Bulatov <oleg@bulatov.me>
|
|
||||||
olegburov <oleg.burov@outlook.com>
|
|
||||||
Olivier <o+github@gambier.email>
|
|
||||||
Olivier Gambier <olivier@docker.com>
|
|
||||||
Olivier Jacques <olivier.jacques@hp.com>
|
|
||||||
ollypom <oppomeroy@gmail.com>
|
|
||||||
Omer Cohen <git@omer.io>
|
|
||||||
Oscar Caballero <ocaballero@opensistemas.com>
|
|
||||||
Owen W. Taylor <otaylor@fishsoup.net>
|
|
||||||
paigehargrave <Paige.hargrave@docker.com>
|
|
||||||
Parth Mehrotra <parth@mehrotra.me>
|
|
||||||
Pascal Borreli <pascal@borreli.com>
|
|
||||||
Patrick Devine <patrick.devine@docker.com>
|
|
||||||
Patrick Easters <peasters@redhat.com>
|
|
||||||
Paul Cacheux <paul.cacheux@datadoghq.com>
|
|
||||||
Pavel Antonov <ddc67cd@gmail.com>
|
|
||||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
|
||||||
Per Lundberg <perlun@gmail.com>
|
|
||||||
Peter Choi <reikani@Peters-MacBook-Pro.local>
|
|
||||||
Peter Dave Hello <hsu@peterdavehello.org>
|
|
||||||
Peter Kokot <peterkokot@gmail.com>
|
|
||||||
Phil Estes <estesp@gmail.com>
|
|
||||||
Philip Misiowiec <philip@atlashealth.com>
|
|
||||||
Pierre-Yves Ritschard <pyr@spootnik.org>
|
|
||||||
Pieter Scheffers <pieter.scheffers@gmail.com>
|
|
||||||
Qiang Huang <h.huangqiang@huawei.com>
|
|
||||||
Qiao Anran <qiaoanran@gmail.com>
|
|
||||||
Radon Rosborough <radon.neon@gmail.com>
|
|
||||||
Randy Barlow <randy@electronsweatshop.com>
|
|
||||||
Raphaël Enrici <raphael@root-42.com>
|
|
||||||
Ricardo Maraschini <ricardo.maraschini@gmail.com>
|
|
||||||
Richard Scothern <richard.scothern@gmail.com>
|
|
||||||
Rick Wieman <git@rickw.nl>
|
|
||||||
Rik Nijessen <rik@keefo.nl>
|
|
||||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
|
||||||
Rober Morales-Chaparro <rober.morales@rstor.io>
|
|
||||||
Robert Kaussow <mail@geeklabor.de>
|
|
||||||
Robert Steward <speaktorob@users.noreply.github.com>
|
|
||||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
|
||||||
ROY <qqbuby@gmail.com>
|
|
||||||
Rui Cao <ruicao@alauda.io>
|
|
||||||
ruicao <ruicao@alauda.io>
|
|
||||||
Rusty Conover <rusty@luckydinosaur.com>
|
|
||||||
Ryan Abrams <rdabrams@gmail.com>
|
|
||||||
Ryan Thomas <rthomas@atlassian.com>
|
|
||||||
sakeven <jc5930@sina.cn>
|
|
||||||
Sam Alba <sam.alba@gmail.com>
|
|
||||||
Samuel Karp <skarp@amazon.com>
|
|
||||||
sangluo <sangluo@pinduoduo.com>
|
|
||||||
Santiago Torres <torresariass@gmail.com>
|
|
||||||
Sargun Dhillon <sargun@sargun.me>
|
|
||||||
sayboras <sayboras@yahoo.com>
|
|
||||||
Sean Boran <Boran@users.noreply.github.com>
|
|
||||||
Sean P. Kane <spkane00@gmail.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Sebastien Coavoux <s.coavoux@free.fr>
|
|
||||||
Serge Dubrouski <sergeyfd@gmail.com>
|
|
||||||
Sevki Hasirci <sevki@cloudflare.com>
|
|
||||||
Sharif Nassar <sharif@mrwacky.com>
|
|
||||||
Shawn Chen <chen8132@gmail.com>
|
|
||||||
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
|
|
||||||
Shawnpku <chen8132@gmail.com>
|
|
||||||
Shengjing Zhu <zhsj@debian.org>
|
|
||||||
Shiela M Parker <smp13@live.com>
|
|
||||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
|
||||||
Shreyas Karnik <karnik.shreyas@gmail.com>
|
|
||||||
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
|
|
||||||
Simon <crydotsnakegithub@gmail.com>
|
|
||||||
Simon Thulbourn <simon+github@thulbourn.com>
|
|
||||||
Simone Locci <simone.locci@eng.it>
|
|
||||||
Smasherr <soundcracker@gmail.com>
|
|
||||||
Solomon Hykes <solomon@dagger.io>
|
|
||||||
Sora Morimoto <sora@morimoto.io>
|
|
||||||
spacexnice <yaoyao.xyy@alibaba-inc.com>
|
|
||||||
Spencer Rinehart <anubis@overthemonkey.com>
|
|
||||||
srajmane <31947381+srajmane@users.noreply.github.com>
|
|
||||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
|
||||||
Stan Hu <stanhu@gmail.com>
|
|
||||||
Stefan Lörwald <10850250+stefanloerwald@users.noreply.github.com>
|
|
||||||
Stefan Majewsky <stefan.majewsky@sap.com>
|
|
||||||
Stefan Nica <snica@suse.com>
|
|
||||||
Stefan Weil <sw@weilnetz.de>
|
|
||||||
Stephen Day <stevvooe@gmail.com>
|
|
||||||
Steve Lasker <stevenlasker@hotmail.com>
|
|
||||||
Steven Hanna <stevenhanna6@gmail.com>
|
|
||||||
Steven Kalt <SKalt@users.noreply.github.com>
|
|
||||||
Steven Taylor <steven.taylor@me.com>
|
|
||||||
stonezdj <stonezdj@gmail.com>
|
|
||||||
sun jian <cnhttpd@gmail.com>
|
|
||||||
Sungho Moon <sungho.moon@navercorp.com>
|
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
|
||||||
Sylvain Baubeau <sbaubeau@redhat.com>
|
|
||||||
syntaxkim <40621244+syntaxkim@users.noreply.github.com>
|
|
||||||
T N <tnir@users.noreply.github.com>
|
|
||||||
t-eimizu <t-eimizu@aim.ac>
|
|
||||||
Tariq Ibrahim <tariq181290@gmail.com>
|
|
||||||
TaylorKanper <tony_kanper@hotmail.com>
|
|
||||||
Ted Reed <ted.reed@gmail.com>
|
|
||||||
Terin Stock <terinjokes@gmail.com>
|
|
||||||
tgic <farmer1992@gmail.com>
|
|
||||||
Thomas Berger <loki@lokis-chaos.de>
|
|
||||||
Thomas Sjögren <konstruktoid@users.noreply.github.com>
|
|
||||||
Tianon Gravi <admwiggin@gmail.com>
|
|
||||||
Tibor Vass <teabee89@gmail.com>
|
|
||||||
tifayuki <tifayuki@gmail.com>
|
|
||||||
Tiger Kaovilai <tkaovila@redhat.com>
|
|
||||||
Tobias Fuhrimann <mastertinner@users.noreply.github.com>
|
|
||||||
Tobias Schwab <tobias.schwab@dynport.de>
|
|
||||||
Tom Hayward <thayward@infoblox.com>
|
|
||||||
Tom Hu <tomhu1096@gmail.com>
|
|
||||||
Tonis Tiigi <tonistiigi@gmail.com>
|
|
||||||
Tony Holdstock-Brown <tony@docker.com>
|
|
||||||
Tosone <i@tosone.cn>
|
|
||||||
Trapier Marshall <trapier@users.noreply.github.com>
|
|
||||||
Trevor Pounds <trevor.pounds@gmail.com>
|
|
||||||
Trevor Wood <Trevor.G.Wood@gmail.com>
|
|
||||||
Troels Thomsen <troels@thomsen.io>
|
|
||||||
uhayate <uhayate.gong@daocloud.io>
|
|
||||||
Usha Mandya <47779042+usha-mandya@users.noreply.github.com>
|
|
||||||
Usha Mandya <usha.mandya@docker.com>
|
|
||||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
|
||||||
Vega Chou <VegeChou@users.noreply.github.com>
|
|
||||||
Veres Lajos <vlajos@gmail.com>
|
|
||||||
Victor Vieux <victorvieux@gmail.com>
|
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
|
||||||
Vidar <vl@ez.no>
|
|
||||||
Viktor Stanchev <me@viktorstanchev.com>
|
|
||||||
Vincent Batts <vbatts@redhat.com>
|
|
||||||
Vincent Demeester <vincent.demeester@docker.com>
|
|
||||||
Vincent Giersch <vincent@giersch.fr>
|
|
||||||
Vishesh Jindal <vishesh92@gmail.com>
|
|
||||||
W. Trevor King <wking@tremily.us>
|
|
||||||
Wang Jie <wangjie5@chinaskycloud.com>
|
|
||||||
Wang Yan <wangyan@vmware.com>
|
|
||||||
Wassim Dhif <wassimdhif@gmail.com>
|
|
||||||
wayne <wayne.warren.s@gmail.com>
|
|
||||||
Wei Fu <fuweid89@gmail.com>
|
|
||||||
Wei Meng <wemeng@microsoft.com>
|
|
||||||
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
|
|
||||||
Wen-Quan Li <legendarilylwq@gmail.com>
|
|
||||||
Wenkai Yin <yinw@vmware.com>
|
|
||||||
william wei <1342247033@qq.com>
|
|
||||||
xg.song <xg.song@venusource.com>
|
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
|
||||||
Xueshan Feng <xueshan.feng@gmail.com>
|
|
||||||
Yann ROBERT <yann.robert@anantaplex.fr>
|
|
||||||
Yannick Fricke <YannickFricke@users.noreply.github.com>
|
|
||||||
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
|
|
||||||
yixi zhang <yixi@memsql.com>
|
|
||||||
Yong Tang <yong.tang.github@outlook.com>
|
|
||||||
Yong Wen Chua <lawliet89@users.noreply.github.com>
|
|
||||||
Yongxin Li <yxli@alauda.io>
|
|
||||||
Yu Wang <yuwa@microsoft.com>
|
|
||||||
yuexiao-wang <wang.yuexiao@zte.com.cn>
|
|
||||||
YuJie <390282283@qq.com>
|
|
||||||
yuzou <zouyu7@huawei.com>
|
|
||||||
Zhang Wei <zhangwei555@huawei.com>
|
|
||||||
zhipengzuo <zuozhipeng@baidu.com>
|
|
||||||
zhouhaibing089 <zhouhaibing089@gmail.com>
|
|
||||||
zounengren <zounengren@cmss.chinamobile.com>
|
|
||||||
姜继忠 <jizhong.jiangjz@alibaba-inc.com>
|
|
10
Dockerfile
10
Dockerfile
|
@ -1,7 +1,7 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.5
|
ARG GO_VERSION=1.21.5
|
||||||
ARG ALPINE_VERSION=3.20
|
ARG ALPINE_VERSION=3.18
|
||||||
ARG XX_VERSION=1.2.1
|
ARG XX_VERSION=1.2.1
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
|
@ -16,7 +16,7 @@ FROM base AS version
|
||||||
ARG PKG=github.com/distribution/distribution/v3
|
ARG PKG=github.com/distribution/distribution/v3
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
||||||
echo "-X ${PKG}/version.version=${VERSION#v} -X ${PKG}/version.revision=${REVISION} -X ${PKG}/version.mainpkg=${PKG}" | tee /tmp/.ldflags; \
|
echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
|
||||||
echo -n "${VERSION}" | tee /tmp/.version;
|
echo -n "${VERSION}" | tee /tmp/.version;
|
||||||
|
|
||||||
FROM base AS build
|
FROM base AS build
|
||||||
|
@ -52,9 +52,9 @@ COPY --from=releaser /out /
|
||||||
|
|
||||||
FROM alpine:${ALPINE_VERSION}
|
FROM alpine:${ALPINE_VERSION}
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates
|
||||||
COPY cmd/registry/config-dev.yml /etc/distribution/config.yml
|
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||||
COPY --from=binary /registry /bin/registry
|
COPY --from=binary /registry /bin/registry
|
||||||
VOLUME ["/var/lib/registry"]
|
VOLUME ["/var/lib/registry"]
|
||||||
EXPOSE 5000
|
EXPOSE 5000
|
||||||
ENTRYPOINT ["registry"]
|
ENTRYPOINT ["registry"]
|
||||||
CMD ["serve", "/etc/distribution/config.yml"]
|
CMD ["serve", "/etc/docker/registry/config.yml"]
|
||||||
|
|
10
Makefile
10
Makefile
|
@ -37,7 +37,7 @@ WHALE = "+"
|
||||||
TESTFLAGS_RACE=
|
TESTFLAGS_RACE=
|
||||||
GOFILES=$(shell find . -type f -name '*.go')
|
GOFILES=$(shell find . -type f -name '*.go')
|
||||||
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
||||||
GO_LDFLAGS=-ldflags '-extldflags "-Wl,-z,now" -s -w -X $(PKG)/version.version=$(VERSION) -X $(PKG)/version.revision=$(REVISION) -X $(PKG)/version.mainpkg=$(PKG) $(EXTRA_LDFLAGS)'
|
GO_LDFLAGS=-ldflags '-extldflags "-Wl,-z,now" -s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
|
||||||
|
|
||||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||||
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
|
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
|
||||||
TESTFLAGS_PARALLEL ?= 8
|
TESTFLAGS_PARALLEL ?= 8
|
||||||
|
|
||||||
.PHONY: all build binaries clean test test-race test-full integration test-coverage validate lint validate-git validate-vendor vendor mod-outdated image validate-authors authors
|
.PHONY: all build binaries clean test test-race test-full integration test-coverage validate lint validate-git validate-vendor vendor mod-outdated image
|
||||||
.DEFAULT: all
|
.DEFAULT: all
|
||||||
|
|
||||||
.PHONY: FORCE
|
.PHONY: FORCE
|
||||||
|
@ -86,9 +86,6 @@ vendor: ## update vendor
|
||||||
mod-outdated: ## check outdated dependencies
|
mod-outdated: ## check outdated dependencies
|
||||||
docker buildx bake $@
|
docker buildx bake $@
|
||||||
|
|
||||||
authors: ## generate authors
|
|
||||||
docker buildx bake $@
|
|
||||||
|
|
||||||
##@ Test
|
##@ Test
|
||||||
|
|
||||||
test: ## run tests, except integration test with test.short
|
test: ## run tests, except integration test with test.short
|
||||||
|
@ -175,9 +172,6 @@ validate-git: ## validate git
|
||||||
validate-vendor: ## validate vendor
|
validate-vendor: ## validate vendor
|
||||||
docker buildx bake $@
|
docker buildx bake $@
|
||||||
|
|
||||||
validate-authors: ## validate authors
|
|
||||||
docker buildx bake $@
|
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
help:
|
help:
|
||||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
<img style="align: center; padding-left: 10px; padding-right: 10px; padding-bottom: 10px;" width="238px" height="238px" src="./distribution-logo.svg" />
|
<img style="align: center; padding-left: 10px; padding-right: 10px; padding-bottom: 10px;" width="238px" height="238px" src="./distribution-logo.svg" />
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
[![Build Status](https://github.com/distribution/distribution/workflows/build/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions/workflows/build.yml?query=workflow%3Abuild)
|
[![Build Status](https://github.com/distribution/distribution/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions?query=workflow%3ACI)
|
||||||
[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/distribution)
|
[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/distribution)
|
||||||
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
|
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
|
||||||
[![codecov](https://codecov.io/gh/distribution/distribution/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/distribution)
|
[![codecov](https://codecov.io/gh/distribution/distribution/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/distribution)
|
||||||
|
@ -27,7 +27,7 @@ This repository contains the following components:
|
||||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| **registry** | An implementation of the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). |
|
| **registry** | An implementation of the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). |
|
||||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://pkg.go.dev/github.com/distribution/distribution) for details. **Note**: The interfaces for these libraries are **unstable**. |
|
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://pkg.go.dev/github.com/distribution/distribution) for details. **Note**: The interfaces for these libraries are **unstable**. |
|
||||||
| **documentation** | Full documentation is available at [https://distribution.github.io/distribution](https://distribution.github.io/distribution/).
|
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||||
|
|
||||||
### How does this integrate with Docker, containerd, and other OCI client?
|
### How does this integrate with Docker, containerd, and other OCI client?
|
||||||
|
|
||||||
|
|
15
blobs.go
15
blobs.go
|
@ -85,6 +85,15 @@ type Descriptor struct {
|
||||||
// depend on the simplicity of this type.
|
// depend on the simplicity of this type.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Descriptor returns the descriptor, to make it satisfy the Describable
|
||||||
|
// interface. Note that implementations of Describable are generally objects
|
||||||
|
// which can be described, not simply descriptors; this exception is in place
|
||||||
|
// to make it more convenient to pass actual descriptors to functions that
|
||||||
|
// expect Describable objects.
|
||||||
|
func (d Descriptor) Descriptor() Descriptor {
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
// BlobStatter makes blob descriptors available by digest. The service may
|
// BlobStatter makes blob descriptors available by digest. The service may
|
||||||
// provide a descriptor of a different digest if the provided digest is not
|
// provide a descriptor of a different digest if the provided digest is not
|
||||||
// canonical.
|
// canonical.
|
||||||
|
@ -131,6 +140,12 @@ type BlobDescriptorServiceFactory interface {
|
||||||
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
|
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadSeekCloser is the primary reader type for blob data, combining
|
||||||
|
// io.ReadSeeker with io.Closer.
|
||||||
|
//
|
||||||
|
// Deprecated: use [io.ReadSeekCloser].
|
||||||
|
type ReadSeekCloser = io.ReadSeekCloser
|
||||||
|
|
||||||
// BlobProvider describes operations for getting blob data.
|
// BlobProvider describes operations for getting blob data.
|
||||||
type BlobProvider interface {
|
type BlobProvider interface {
|
||||||
// Get returns the entire blob identified by digest along with the descriptor.
|
// Get returns the entire blob identified by digest along with the descriptor.
|
||||||
|
|
|
@ -12,8 +12,6 @@ storage:
|
||||||
maintenance:
|
maintenance:
|
||||||
uploadpurging:
|
uploadpurging:
|
||||||
enabled: false
|
enabled: false
|
||||||
tag:
|
|
||||||
concurrencylimit: 8
|
|
||||||
http:
|
http:
|
||||||
addr: :5000
|
addr: :5000
|
||||||
secret: asecretforlocaldevelopment
|
secret: asecretforlocaldevelopment
|
||||||
|
@ -22,10 +20,11 @@ http:
|
||||||
headers:
|
headers:
|
||||||
X-Content-Type-Options: [nosniff]
|
X-Content-Type-Options: [nosniff]
|
||||||
redis:
|
redis:
|
||||||
addrs: [localhost:6379]
|
addr: localhost:6379
|
||||||
maxidleconns: 16
|
pool:
|
||||||
poolsize: 64
|
maxidle: 16
|
||||||
connmaxidletime: 300s
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
dialtimeout: 10ms
|
dialtimeout: 10ms
|
||||||
readtimeout: 10ms
|
readtimeout: 10ms
|
||||||
writetimeout: 10ms
|
writetimeout: 10ms
|
||||||
|
|
99
cmd/registry/config-dev-frostfs.yml
Normal file
99
cmd/registry/config-dev-frostfs.yml
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
version: 0.1
|
||||||
|
log:
|
||||||
|
level: debug
|
||||||
|
fields:
|
||||||
|
service: registry
|
||||||
|
environment: development
|
||||||
|
hooks:
|
||||||
|
- type: mail
|
||||||
|
disabled: true
|
||||||
|
levels:
|
||||||
|
- panic
|
||||||
|
options:
|
||||||
|
smtp:
|
||||||
|
addr: mail.example.com:25
|
||||||
|
username: mailuser
|
||||||
|
password: password
|
||||||
|
insecure: true
|
||||||
|
from: sender@example.com
|
||||||
|
to:
|
||||||
|
- errors@example.com
|
||||||
|
storage:
|
||||||
|
delete:
|
||||||
|
enabled: true
|
||||||
|
cache:
|
||||||
|
blobdescriptor: inmemory
|
||||||
|
maintenance:
|
||||||
|
uploadpurging:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
frostfs:
|
||||||
|
wallet:
|
||||||
|
path: /path/to/wallet.json
|
||||||
|
password: ""
|
||||||
|
peers:
|
||||||
|
0:
|
||||||
|
address: s01.frostfs.devenv:8080
|
||||||
|
weight: 1
|
||||||
|
priority: 1
|
||||||
|
1:
|
||||||
|
address: s02.frostfs.devenv:8080
|
||||||
|
weight: 1
|
||||||
|
priority: 1
|
||||||
|
2:
|
||||||
|
address: s03.frostfs.devenv:8080
|
||||||
|
weight: 1
|
||||||
|
priority: 1
|
||||||
|
3:
|
||||||
|
address: s04.frostfs.devenv:8080
|
||||||
|
weight: 1
|
||||||
|
priority: 1
|
||||||
|
# container can be nicename (rpc_endpoint is required)
|
||||||
|
container: ChzA3qeJHbAT2nyo35LofdJ7jMqVuT9h3WoRpxHRn9Uq
|
||||||
|
# the following params are optional
|
||||||
|
session_expiration_duration: 1000 # in blocks
|
||||||
|
connection_timeout: 5s
|
||||||
|
request_timeout: 5s
|
||||||
|
rebalance_interval: 30s
|
||||||
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
|
http:
|
||||||
|
addr: :5000
|
||||||
|
debug:
|
||||||
|
addr: :5001
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
path: /metrics
|
||||||
|
headers:
|
||||||
|
X-Content-Type-Options: [ nosniff ]
|
||||||
|
redis:
|
||||||
|
addr: localhost:6379
|
||||||
|
pool:
|
||||||
|
maxidle: 16
|
||||||
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
|
dialtimeout: 10ms
|
||||||
|
readtimeout: 10ms
|
||||||
|
writetimeout: 10ms
|
||||||
|
notifications:
|
||||||
|
events:
|
||||||
|
includereferences: true
|
||||||
|
endpoints:
|
||||||
|
- name: local-5003
|
||||||
|
url: http://localhost:5003/callback
|
||||||
|
headers:
|
||||||
|
Authorization: [ Bearer <an example token> ]
|
||||||
|
timeout: 1s
|
||||||
|
threshold: 10
|
||||||
|
backoff: 1s
|
||||||
|
disabled: true
|
||||||
|
- name: local-8083
|
||||||
|
url: http://localhost:8083/callback
|
||||||
|
timeout: 1s
|
||||||
|
threshold: 10
|
||||||
|
backoff: 1s
|
||||||
|
disabled: true
|
||||||
|
health:
|
||||||
|
storagedriver:
|
||||||
|
enabled: true
|
||||||
|
interval: 30s
|
||||||
|
threshold: 3
|
|
@ -14,8 +14,6 @@ storage:
|
||||||
maintenance:
|
maintenance:
|
||||||
uploadpurging:
|
uploadpurging:
|
||||||
enabled: false
|
enabled: false
|
||||||
tag:
|
|
||||||
concurrencylimit: 8
|
|
||||||
http:
|
http:
|
||||||
addr: :5000
|
addr: :5000
|
||||||
debug:
|
debug:
|
||||||
|
|
|
@ -7,8 +7,6 @@ storage:
|
||||||
blobdescriptor: inmemory
|
blobdescriptor: inmemory
|
||||||
filesystem:
|
filesystem:
|
||||||
rootdirectory: /var/lib/registry
|
rootdirectory: /var/lib/registry
|
||||||
tag:
|
|
||||||
concurrencylimit: 8
|
|
||||||
http:
|
http:
|
||||||
addr: :5000
|
addr: :5000
|
||||||
headers:
|
headers:
|
||||||
|
|
|
@ -10,11 +10,11 @@ import (
|
||||||
_ "github.com/distribution/distribution/v3/registry/proxy"
|
_ "github.com/distribution/distribution/v3/registry/proxy"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/azure"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/azure"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||||
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/frostfs"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/gcs"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/gcs"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/cloudfront"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/cloudfront"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/redirect"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/redirect"
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/middleware/rewrite"
|
|
||||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/redis/go-redis/v9"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
|
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
|
||||||
|
@ -159,15 +157,9 @@ type Configuration struct {
|
||||||
// HTTP2 configuration options
|
// HTTP2 configuration options
|
||||||
HTTP2 struct {
|
HTTP2 struct {
|
||||||
// Specifies whether the registry should disallow clients attempting
|
// Specifies whether the registry should disallow clients attempting
|
||||||
// to connect via HTTP/2. If set to true, only HTTP/1.1 is supported.
|
// to connect via http2. If set to true, only http/1.1 is supported.
|
||||||
Disabled bool `yaml:"disabled,omitempty"`
|
Disabled bool `yaml:"disabled,omitempty"`
|
||||||
} `yaml:"http2,omitempty"`
|
} `yaml:"http2,omitempty"`
|
||||||
|
|
||||||
H2C struct {
|
|
||||||
// Enables H2C (HTTP/2 Cleartext). Enable to support HTTP/2 without needing to configure TLS
|
|
||||||
// Useful when deploying the registry behind a load balancer (e.g. Cloud Run)
|
|
||||||
Enabled bool `yaml:"enabled,omitempty"`
|
|
||||||
} `yaml:"h2c,omitempty"`
|
|
||||||
} `yaml:"http,omitempty"`
|
} `yaml:"http,omitempty"`
|
||||||
|
|
||||||
// Notifications specifies configuration about various endpoint to which
|
// Notifications specifies configuration about various endpoint to which
|
||||||
|
@ -183,7 +175,25 @@ type Configuration struct {
|
||||||
Proxy Proxy `yaml:"proxy,omitempty"`
|
Proxy Proxy `yaml:"proxy,omitempty"`
|
||||||
|
|
||||||
// Validation configures validation options for the registry.
|
// Validation configures validation options for the registry.
|
||||||
Validation Validation `yaml:"validation,omitempty"`
|
Validation struct {
|
||||||
|
// Enabled enables the other options in this section. This field is
|
||||||
|
// deprecated in favor of Disabled.
|
||||||
|
Enabled bool `yaml:"enabled,omitempty"`
|
||||||
|
// Disabled disables the other options in this section.
|
||||||
|
Disabled bool `yaml:"disabled,omitempty"`
|
||||||
|
// Manifests configures manifest validation.
|
||||||
|
Manifests struct {
|
||||||
|
// URLs configures validation for URLs in pushed manifests.
|
||||||
|
URLs struct {
|
||||||
|
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||||
|
// that URLs in pushed manifests must match.
|
||||||
|
Allow []string `yaml:"allow,omitempty"`
|
||||||
|
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
|
||||||
|
// that URLs in pushed manifests must not match.
|
||||||
|
Deny []string `yaml:"deny,omitempty"`
|
||||||
|
} `yaml:"urls,omitempty"`
|
||||||
|
} `yaml:"manifests,omitempty"`
|
||||||
|
} `yaml:"validation,omitempty"`
|
||||||
|
|
||||||
// Policy configures registry policy options.
|
// Policy configures registry policy options.
|
||||||
Policy struct {
|
Policy struct {
|
||||||
|
@ -261,6 +271,44 @@ type FileChecker struct {
|
||||||
Threshold int `yaml:"threshold,omitempty"`
|
Threshold int `yaml:"threshold,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Redis configures the redis pool available to the registry webapp.
|
||||||
|
type Redis struct {
|
||||||
|
// Addr specifies the the redis instance available to the application.
|
||||||
|
Addr string `yaml:"addr,omitempty"`
|
||||||
|
|
||||||
|
// Usernames can be used as a finer-grained permission control since the introduction of the redis 6.0.
|
||||||
|
Username string `yaml:"username,omitempty"`
|
||||||
|
|
||||||
|
// Password string to use when making a connection.
|
||||||
|
Password string `yaml:"password,omitempty"`
|
||||||
|
|
||||||
|
// DB specifies the database to connect to on the redis instance.
|
||||||
|
DB int `yaml:"db,omitempty"`
|
||||||
|
|
||||||
|
// TLS configures settings for redis in-transit encryption
|
||||||
|
TLS struct {
|
||||||
|
Enabled bool `yaml:"enabled,omitempty"`
|
||||||
|
} `yaml:"tls,omitempty"`
|
||||||
|
|
||||||
|
DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect
|
||||||
|
ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data
|
||||||
|
WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data
|
||||||
|
|
||||||
|
// Pool configures the behavior of the redis connection pool.
|
||||||
|
Pool struct {
|
||||||
|
// MaxIdle sets the maximum number of idle connections.
|
||||||
|
MaxIdle int `yaml:"maxidle,omitempty"`
|
||||||
|
|
||||||
|
// MaxActive sets the maximum number of connections that should be
|
||||||
|
// opened before blocking a connection request.
|
||||||
|
MaxActive int `yaml:"maxactive,omitempty"`
|
||||||
|
|
||||||
|
// IdleTimeout sets the amount time to wait before closing
|
||||||
|
// inactive connections.
|
||||||
|
IdleTimeout time.Duration `yaml:"idletimeout,omitempty"`
|
||||||
|
} `yaml:"pool,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
|
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
|
||||||
type HTTPChecker struct {
|
type HTTPChecker struct {
|
||||||
// Timeout is the duration to wait before timing out the HTTP request
|
// Timeout is the duration to wait before timing out the HTTP request
|
||||||
|
@ -312,13 +360,6 @@ type Health struct {
|
||||||
} `yaml:"storagedriver,omitempty"`
|
} `yaml:"storagedriver,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Platform struct {
|
|
||||||
// Architecture is the architecture for this platform
|
|
||||||
Architecture string `yaml:"architecture,omitempty"`
|
|
||||||
// OS is the operating system for this platform
|
|
||||||
OS string `yaml:"os,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// v0_1Configuration is a Version 0.1 Configuration struct
|
// v0_1Configuration is a Version 0.1 Configuration struct
|
||||||
// This is currently aliased to Configuration, as it is the current version
|
// This is currently aliased to Configuration, as it is the current version
|
||||||
type v0_1Configuration Configuration
|
type v0_1Configuration Configuration
|
||||||
|
@ -394,8 +435,6 @@ func (storage Storage) Type() string {
|
||||||
// allow configuration of delete
|
// allow configuration of delete
|
||||||
case "redirect":
|
case "redirect":
|
||||||
// allow configuration of redirect
|
// allow configuration of redirect
|
||||||
case "tag":
|
|
||||||
// allow configuration of tag
|
|
||||||
default:
|
default:
|
||||||
storageType = append(storageType, k)
|
storageType = append(storageType, k)
|
||||||
}
|
}
|
||||||
|
@ -409,19 +448,6 @@ func (storage Storage) Type() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// TagParameters returns the Parameters map for a Storage tag configuration
|
|
||||||
func (storage Storage) TagParameters() Parameters {
|
|
||||||
return storage["tag"]
|
|
||||||
}
|
|
||||||
|
|
||||||
// setTagParameter changes the parameter at the provided key to the new value
|
|
||||||
func (storage Storage) setTagParameter(key string, value interface{}) {
|
|
||||||
if _, ok := storage["tag"]; !ok {
|
|
||||||
storage["tag"] = make(Parameters)
|
|
||||||
}
|
|
||||||
storage["tag"][key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parameters returns the Parameters map for a Storage configuration
|
// Parameters returns the Parameters map for a Storage configuration
|
||||||
func (storage Storage) Parameters() Parameters {
|
func (storage Storage) Parameters() Parameters {
|
||||||
return storage[storage.Type()]
|
return storage[storage.Type()]
|
||||||
|
@ -450,8 +476,6 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
// allow configuration of delete
|
// allow configuration of delete
|
||||||
case "redirect":
|
case "redirect":
|
||||||
// allow configuration of redirect
|
// allow configuration of redirect
|
||||||
case "tag":
|
|
||||||
// allow configuration of tag
|
|
||||||
default:
|
default:
|
||||||
types = append(types, k)
|
types = append(types, k)
|
||||||
}
|
}
|
||||||
|
@ -606,62 +630,6 @@ type Proxy struct {
|
||||||
TTL *time.Duration `yaml:"ttl,omitempty"`
|
TTL *time.Duration `yaml:"ttl,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Validation struct {
|
|
||||||
// Enabled enables the other options in this section. This field is
|
|
||||||
// deprecated in favor of Disabled.
|
|
||||||
Enabled bool `yaml:"enabled,omitempty"`
|
|
||||||
// Disabled disables the other options in this section.
|
|
||||||
Disabled bool `yaml:"disabled,omitempty"`
|
|
||||||
// Manifests configures manifest validation.
|
|
||||||
Manifests ValidationManifests `yaml:"manifests,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ValidationManifests struct {
|
|
||||||
// URLs configures validation for URLs in pushed manifests.
|
|
||||||
URLs struct {
|
|
||||||
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
|
|
||||||
// that URLs in pushed manifests must match.
|
|
||||||
Allow []string `yaml:"allow,omitempty"`
|
|
||||||
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
|
|
||||||
// that URLs in pushed manifests must not match.
|
|
||||||
Deny []string `yaml:"deny,omitempty"`
|
|
||||||
} `yaml:"urls,omitempty"`
|
|
||||||
// ImageIndexes configures validation of image indexes
|
|
||||||
Indexes ValidationIndexes `yaml:"indexes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ValidationIndexes struct {
|
|
||||||
// Platforms configures the validation applies to the platform images included in an image index
|
|
||||||
Platforms Platforms `yaml:"platforms"`
|
|
||||||
// PlatformList filters the set of platforms to validate for image existence.
|
|
||||||
PlatformList []Platform `yaml:"platformlist,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Platforms configures the validation applies to the platform images included in an image index
|
|
||||||
// This can be all, none, or list
|
|
||||||
type Platforms string
|
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Umarshaler interface
|
|
||||||
// Unmarshals a string into a Platforms option, lowercasing the string and validating that it represents a
|
|
||||||
// valid option
|
|
||||||
func (platforms *Platforms) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|
||||||
var platformsString string
|
|
||||||
err := unmarshal(&platformsString)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
platformsString = strings.ToLower(platformsString)
|
|
||||||
switch platformsString {
|
|
||||||
case "all", "none", "list":
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid platforms option %s Must be one of [all, none, list]", platformsString)
|
|
||||||
}
|
|
||||||
|
|
||||||
*platforms = Platforms(platformsString)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses an input configuration yaml document into a Configuration struct
|
// Parse parses an input configuration yaml document into a Configuration struct
|
||||||
// This should generally be capable of handling old configuration format versions
|
// This should generally be capable of handling old configuration format versions
|
||||||
//
|
//
|
||||||
|
@ -714,172 +682,3 @@ func Parse(rd io.Reader) (*Configuration, error) {
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type RedisOptions = redis.UniversalOptions
|
|
||||||
|
|
||||||
type RedisTLSOptions struct {
|
|
||||||
Certificate string `yaml:"certificate,omitempty"`
|
|
||||||
Key string `yaml:"key,omitempty"`
|
|
||||||
ClientCAs []string `yaml:"clientcas,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Redis struct {
|
|
||||||
Options RedisOptions `yaml:",inline"`
|
|
||||||
TLS RedisTLSOptions `yaml:"tls,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Redis) MarshalYAML() (interface{}, error) {
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
|
|
||||||
val := reflect.ValueOf(c.Options)
|
|
||||||
typ := val.Type()
|
|
||||||
|
|
||||||
for i := 0; i < val.NumField(); i++ {
|
|
||||||
field := typ.Field(i)
|
|
||||||
fieldValue := val.Field(i)
|
|
||||||
|
|
||||||
// ignore funcs fields in redis.UniversalOptions
|
|
||||||
if fieldValue.Kind() == reflect.Func {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fields[strings.ToLower(field.Name)] = fieldValue.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add TLS fields if they're not empty
|
|
||||||
if c.TLS.Certificate != "" || c.TLS.Key != "" || len(c.TLS.ClientCAs) > 0 {
|
|
||||||
fields["tls"] = c.TLS
|
|
||||||
}
|
|
||||||
|
|
||||||
return fields, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Redis) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|
||||||
var fields map[string]interface{}
|
|
||||||
err := unmarshal(&fields)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
val := reflect.ValueOf(&c.Options).Elem()
|
|
||||||
typ := val.Type()
|
|
||||||
|
|
||||||
for i := 0; i < typ.NumField(); i++ {
|
|
||||||
field := typ.Field(i)
|
|
||||||
fieldName := strings.ToLower(field.Name)
|
|
||||||
|
|
||||||
if value, ok := fields[fieldName]; ok {
|
|
||||||
fieldValue := val.Field(i)
|
|
||||||
if fieldValue.CanSet() {
|
|
||||||
switch field.Type {
|
|
||||||
case reflect.TypeOf(time.Duration(0)):
|
|
||||||
durationStr, ok := value.(string)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid duration value for field: %s", fieldName)
|
|
||||||
}
|
|
||||||
duration, err := time.ParseDuration(durationStr)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse duration for field: %s, error: %v", fieldName, err)
|
|
||||||
}
|
|
||||||
fieldValue.Set(reflect.ValueOf(duration))
|
|
||||||
default:
|
|
||||||
if err := setFieldValue(fieldValue, value); err != nil {
|
|
||||||
return fmt.Errorf("failed to set value for field: %s, error: %v", fieldName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle TLS fields
|
|
||||||
if tlsData, ok := fields["tls"]; ok {
|
|
||||||
tlsMap, ok := tlsData.(map[interface{}]interface{})
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid TLS data structure")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cert, ok := tlsMap["certificate"]; ok {
|
|
||||||
var isString bool
|
|
||||||
c.TLS.Certificate, isString = cert.(string)
|
|
||||||
if !isString {
|
|
||||||
return fmt.Errorf("Redis TLS certificate must be a string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if key, ok := tlsMap["key"]; ok {
|
|
||||||
var isString bool
|
|
||||||
c.TLS.Key, isString = key.(string)
|
|
||||||
if !isString {
|
|
||||||
return fmt.Errorf("Redis TLS (private) key must be a string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cas, ok := tlsMap["clientcas"]; ok {
|
|
||||||
caList, ok := cas.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("invalid clientcas data structure")
|
|
||||||
}
|
|
||||||
for _, ca := range caList {
|
|
||||||
if caStr, ok := ca.(string); ok {
|
|
||||||
c.TLS.ClientCAs = append(c.TLS.ClientCAs, caStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setFieldValue(field reflect.Value, value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch field.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
stringValue, ok := value.(string)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to string")
|
|
||||||
}
|
|
||||||
field.SetString(stringValue)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
intValue, ok := value.(int)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to integer")
|
|
||||||
}
|
|
||||||
field.SetInt(int64(intValue))
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
uintValue, ok := value.(uint)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to unsigned integer")
|
|
||||||
}
|
|
||||||
field.SetUint(uint64(uintValue))
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
floatValue, ok := value.(float64)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to float")
|
|
||||||
}
|
|
||||||
field.SetFloat(floatValue)
|
|
||||||
case reflect.Bool:
|
|
||||||
boolValue, ok := value.(bool)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to boolean")
|
|
||||||
}
|
|
||||||
field.SetBool(boolValue)
|
|
||||||
case reflect.Slice:
|
|
||||||
slice := reflect.MakeSlice(field.Type(), 0, 0)
|
|
||||||
valueSlice, ok := value.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("failed to convert value to slice")
|
|
||||||
}
|
|
||||||
for _, item := range valueSlice {
|
|
||||||
sliceValue := reflect.New(field.Type().Elem()).Elem()
|
|
||||||
if err := setFieldValue(sliceValue, item); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
slice = reflect.Append(slice, sliceValue)
|
|
||||||
}
|
|
||||||
field.Set(slice)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported field type: %v", field.Type())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/redis/go-redis/v9"
|
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
@ -40,9 +39,6 @@ var configStruct = Configuration{
|
||||||
"url1": "https://foo.example.com",
|
"url1": "https://foo.example.com",
|
||||||
"path1": "/some-path",
|
"path1": "/some-path",
|
||||||
},
|
},
|
||||||
"tag": Parameters{
|
|
||||||
"concurrencylimit": 10,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Auth: Auth{
|
Auth: Auth{
|
||||||
"silly": Parameters{
|
"silly": Parameters{
|
||||||
|
@ -101,9 +97,6 @@ var configStruct = Configuration{
|
||||||
HTTP2 struct {
|
HTTP2 struct {
|
||||||
Disabled bool `yaml:"disabled,omitempty"`
|
Disabled bool `yaml:"disabled,omitempty"`
|
||||||
} `yaml:"http2,omitempty"`
|
} `yaml:"http2,omitempty"`
|
||||||
H2C struct {
|
|
||||||
Enabled bool `yaml:"enabled,omitempty"`
|
|
||||||
} `yaml:"h2c,omitempty"`
|
|
||||||
}{
|
}{
|
||||||
TLS: struct {
|
TLS: struct {
|
||||||
Certificate string `yaml:"certificate,omitempty"`
|
Certificate string `yaml:"certificate,omitempty"`
|
||||||
|
@ -128,38 +121,25 @@ var configStruct = Configuration{
|
||||||
}{
|
}{
|
||||||
Disabled: false,
|
Disabled: false,
|
||||||
},
|
},
|
||||||
H2C: struct {
|
|
||||||
Enabled bool `yaml:"enabled,omitempty"`
|
|
||||||
}{
|
|
||||||
Enabled: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
Redis: Redis{
|
Redis: Redis{
|
||||||
Options: redis.UniversalOptions{
|
Addr: "localhost:6379",
|
||||||
Addrs: []string{"localhost:6379"},
|
|
||||||
Username: "alice",
|
Username: "alice",
|
||||||
Password: "123456",
|
Password: "123456",
|
||||||
DB: 1,
|
DB: 1,
|
||||||
MaxIdleConns: 16,
|
Pool: struct {
|
||||||
PoolSize: 64,
|
MaxIdle int `yaml:"maxidle,omitempty"`
|
||||||
ConnMaxIdleTime: time.Second * 300,
|
MaxActive int `yaml:"maxactive,omitempty"`
|
||||||
|
IdleTimeout time.Duration `yaml:"idletimeout,omitempty"`
|
||||||
|
}{
|
||||||
|
MaxIdle: 16,
|
||||||
|
MaxActive: 64,
|
||||||
|
IdleTimeout: time.Second * 300,
|
||||||
|
},
|
||||||
DialTimeout: time.Millisecond * 10,
|
DialTimeout: time.Millisecond * 10,
|
||||||
ReadTimeout: time.Millisecond * 10,
|
ReadTimeout: time.Millisecond * 10,
|
||||||
WriteTimeout: time.Millisecond * 10,
|
WriteTimeout: time.Millisecond * 10,
|
||||||
},
|
},
|
||||||
TLS: RedisTLSOptions{
|
|
||||||
Certificate: "/foo/cert.crt",
|
|
||||||
Key: "/foo/key.pem",
|
|
||||||
ClientCAs: []string{"/path/to/ca.pem"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Validation: Validation{
|
|
||||||
Manifests: ValidationManifests{
|
|
||||||
Indexes: ValidationIndexes{
|
|
||||||
Platforms: "none",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
|
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct
|
||||||
|
@ -179,8 +159,6 @@ storage:
|
||||||
int1: 42
|
int1: 42
|
||||||
url1: "https://foo.example.com"
|
url1: "https://foo.example.com"
|
||||||
path1: "/some-path"
|
path1: "/some-path"
|
||||||
tag:
|
|
||||||
concurrencylimit: 10
|
|
||||||
auth:
|
auth:
|
||||||
silly:
|
silly:
|
||||||
realm: silly
|
realm: silly
|
||||||
|
@ -199,31 +177,22 @@ notifications:
|
||||||
actions:
|
actions:
|
||||||
- pull
|
- pull
|
||||||
http:
|
http:
|
||||||
tls:
|
|
||||||
clientcas:
|
clientcas:
|
||||||
- /path/to/ca.pem
|
- /path/to/ca.pem
|
||||||
headers:
|
headers:
|
||||||
X-Content-Type-Options: [nosniff]
|
X-Content-Type-Options: [nosniff]
|
||||||
redis:
|
redis:
|
||||||
tls:
|
addr: localhost:6379
|
||||||
certificate: /foo/cert.crt
|
|
||||||
key: /foo/key.pem
|
|
||||||
clientcas:
|
|
||||||
- /path/to/ca.pem
|
|
||||||
addrs: [localhost:6379]
|
|
||||||
username: alice
|
username: alice
|
||||||
password: "123456"
|
password: 123456
|
||||||
db: 1
|
db: 1
|
||||||
maxidleconns: 16
|
pool:
|
||||||
poolsize: 64
|
maxidle: 16
|
||||||
connmaxidletime: 300s
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
dialtimeout: 10ms
|
dialtimeout: 10ms
|
||||||
readtimeout: 10ms
|
readtimeout: 10ms
|
||||||
writetimeout: 10ms
|
writetimeout: 10ms
|
||||||
validation:
|
|
||||||
manifests:
|
|
||||||
indexes:
|
|
||||||
platforms: none
|
|
||||||
`
|
`
|
||||||
|
|
||||||
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
|
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
|
||||||
|
@ -253,10 +222,6 @@ notifications:
|
||||||
http:
|
http:
|
||||||
headers:
|
headers:
|
||||||
X-Content-Type-Options: [nosniff]
|
X-Content-Type-Options: [nosniff]
|
||||||
validation:
|
|
||||||
manifests:
|
|
||||||
indexes:
|
|
||||||
platforms: none
|
|
||||||
`
|
`
|
||||||
|
|
||||||
type ConfigSuite struct {
|
type ConfigSuite struct {
|
||||||
|
@ -296,7 +261,6 @@ func (suite *ConfigSuite) TestParseSimple() {
|
||||||
func (suite *ConfigSuite) TestParseInmemory() {
|
func (suite *ConfigSuite) TestParseInmemory() {
|
||||||
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
|
||||||
suite.expectedConfig.Log.Fields = nil
|
suite.expectedConfig.Log.Fields = nil
|
||||||
suite.expectedConfig.HTTP.TLS.ClientCAs = nil
|
|
||||||
suite.expectedConfig.Redis = Redis{}
|
suite.expectedConfig.Redis = Redis{}
|
||||||
|
|
||||||
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
|
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
|
||||||
|
@ -317,9 +281,7 @@ func (suite *ConfigSuite) TestParseIncomplete() {
|
||||||
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
|
suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}}
|
||||||
suite.expectedConfig.Notifications = Notifications{}
|
suite.expectedConfig.Notifications = Notifications{}
|
||||||
suite.expectedConfig.HTTP.Headers = nil
|
suite.expectedConfig.HTTP.Headers = nil
|
||||||
suite.expectedConfig.HTTP.TLS.ClientCAs = nil
|
|
||||||
suite.expectedConfig.Redis = Redis{}
|
suite.expectedConfig.Redis = Redis{}
|
||||||
suite.expectedConfig.Validation.Manifests.Indexes.Platforms = ""
|
|
||||||
|
|
||||||
// Note: this also tests that REGISTRY_STORAGE and
|
// Note: this also tests that REGISTRY_STORAGE and
|
||||||
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
|
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
|
||||||
|
@ -572,9 +534,6 @@ func copyConfig(config Configuration) *Configuration {
|
||||||
for k, v := range config.Storage.Parameters() {
|
for k, v := range config.Storage.Parameters() {
|
||||||
configCopy.Storage.setParameter(k, v)
|
configCopy.Storage.setParameter(k, v)
|
||||||
}
|
}
|
||||||
for k, v := range config.Storage.TagParameters() {
|
|
||||||
configCopy.Storage.setTagParameter(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
|
configCopy.Auth = Auth{config.Auth.Type(): Parameters{}}
|
||||||
for k, v := range config.Auth.Parameters() {
|
for k, v := range config.Auth.Parameters() {
|
||||||
|
@ -588,20 +547,8 @@ func copyConfig(config Configuration) *Configuration {
|
||||||
for k, v := range config.HTTP.Headers {
|
for k, v := range config.HTTP.Headers {
|
||||||
configCopy.HTTP.Headers[k] = v
|
configCopy.HTTP.Headers[k] = v
|
||||||
}
|
}
|
||||||
configCopy.HTTP.TLS.ClientCAs = make([]string, 0, len(config.HTTP.TLS.ClientCAs))
|
|
||||||
configCopy.HTTP.TLS.ClientCAs = append(configCopy.HTTP.TLS.ClientCAs, config.HTTP.TLS.ClientCAs...)
|
|
||||||
|
|
||||||
configCopy.Redis = config.Redis
|
configCopy.Redis = config.Redis
|
||||||
configCopy.Redis.TLS.Certificate = config.Redis.TLS.Certificate
|
|
||||||
configCopy.Redis.TLS.Key = config.Redis.TLS.Key
|
|
||||||
configCopy.Redis.TLS.ClientCAs = make([]string, 0, len(config.Redis.TLS.ClientCAs))
|
|
||||||
configCopy.Redis.TLS.ClientCAs = append(configCopy.Redis.TLS.ClientCAs, config.Redis.TLS.ClientCAs...)
|
|
||||||
|
|
||||||
configCopy.Validation = Validation{
|
|
||||||
Enabled: config.Validation.Enabled,
|
|
||||||
Disabled: config.Validation.Disabled,
|
|
||||||
Manifests: config.Validation.Manifests,
|
|
||||||
}
|
|
||||||
|
|
||||||
return configCopy
|
return configCopy
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,11 @@ target "update-vendor" {
|
||||||
target "mod-outdated" {
|
target "mod-outdated" {
|
||||||
dockerfile = "./dockerfiles/vendor.Dockerfile"
|
dockerfile = "./dockerfiles/vendor.Dockerfile"
|
||||||
target = "outdated"
|
target = "outdated"
|
||||||
no-cache-filter = ["outdated"]
|
args = {
|
||||||
|
// used to invalidate cache for outdated run stage
|
||||||
|
// can be dropped when https://github.com/moby/buildkit/issues/1213 fixed
|
||||||
|
_RANDOM = uuidv4()
|
||||||
|
}
|
||||||
output = ["type=cacheonly"]
|
output = ["type=cacheonly"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,8 +95,15 @@ target "image-all" {
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "DOCS_BASEURL" {
|
||||||
|
default = null
|
||||||
|
}
|
||||||
|
|
||||||
target "_common_docs" {
|
target "_common_docs" {
|
||||||
dockerfile = "./dockerfiles/docs.Dockerfile"
|
dockerfile = "./dockerfiles/docs.Dockerfile"
|
||||||
|
args = {
|
||||||
|
DOCS_BASEURL = DOCS_BASEURL
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
target "docs-export" {
|
target "docs-export" {
|
||||||
|
@ -113,15 +124,3 @@ target "docs-test" {
|
||||||
target = "test"
|
target = "test"
|
||||||
output = ["type=cacheonly"]
|
output = ["type=cacheonly"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "authors" {
|
|
||||||
dockerfile = "./dockerfiles/authors.Dockerfile"
|
|
||||||
target = "update"
|
|
||||||
output = ["."]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "validate-authors" {
|
|
||||||
dockerfile = "./dockerfiles/authors.Dockerfile"
|
|
||||||
target = "validate"
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,34 +0,0 @@
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
ARG ALPINE_VERSION=3.20
|
|
||||||
|
|
||||||
FROM alpine:${ALPINE_VERSION} AS gen
|
|
||||||
RUN apk add --no-cache git
|
|
||||||
WORKDIR /src
|
|
||||||
RUN --mount=type=bind,target=. <<EOT
|
|
||||||
set -e
|
|
||||||
mkdir /out
|
|
||||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
|
||||||
{
|
|
||||||
echo "# This file lists all individuals having contributed content to the repository."
|
|
||||||
echo "# For how it is generated, see dockerfiles/authors.Dockerfile."
|
|
||||||
echo
|
|
||||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
|
||||||
} > /out/AUTHORS
|
|
||||||
cat /out/AUTHORS
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM scratch AS update
|
|
||||||
COPY --from=gen /out /
|
|
||||||
|
|
||||||
FROM gen AS validate
|
|
||||||
RUN --mount=type=bind,target=.,rw <<EOT
|
|
||||||
set -e
|
|
||||||
git add -A
|
|
||||||
cp -rf /out/* .
|
|
||||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
|
||||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
|
||||||
git status --porcelain -- AUTHORS
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
|
@ -1,7 +1,7 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.5
|
ARG GO_VERSION=1.21.5
|
||||||
ARG ALPINE_VERSION=3.20
|
ARG ALPINE_VERSION=3.18
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||||
RUN apk add --no-cache git
|
RUN apk add --no-cache git
|
||||||
|
@ -16,8 +16,9 @@ COPY --from=hugo $GOPATH/bin/hugo /bin/hugo
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
FROM build-base AS build
|
FROM build-base AS build
|
||||||
|
ARG DOCS_BASEURL=/
|
||||||
RUN --mount=type=bind,rw,source=docs,target=. \
|
RUN --mount=type=bind,rw,source=docs,target=. \
|
||||||
hugo --gc --minify --destination /out
|
hugo --gc --minify --destination /out -b $DOCS_BASEURL
|
||||||
|
|
||||||
FROM build-base AS server
|
FROM build-base AS server
|
||||||
COPY docs .
|
COPY docs .
|
||||||
|
@ -28,12 +29,8 @@ FROM scratch AS out
|
||||||
COPY --from=build /out /
|
COPY --from=build /out /
|
||||||
|
|
||||||
FROM wjdp/htmltest:v0.17.0 AS test
|
FROM wjdp/htmltest:v0.17.0 AS test
|
||||||
# Copy the site to a public/distribution subdirectory
|
|
||||||
# This is a workaround for a limitation in htmltest, see:
|
|
||||||
# https://github.com/wjdp/htmltest/issues/45
|
|
||||||
WORKDIR /test/public/distribution
|
|
||||||
COPY --from=build /out .
|
|
||||||
WORKDIR /test
|
WORKDIR /test
|
||||||
|
COPY --from=build /out ./public
|
||||||
ADD docs/.htmltest.yml .htmltest.yml
|
ADD docs/.htmltest.yml .htmltest.yml
|
||||||
RUN --mount=type=cache,target=tmp/.htmltest \
|
RUN --mount=type=cache,target=tmp/.htmltest \
|
||||||
htmltest
|
htmltest
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.5
|
ARG GO_VERSION=1.20.12
|
||||||
ARG ALPINE_VERSION=3.20
|
ARG ALPINE_VERSION=3.18
|
||||||
|
|
||||||
FROM alpine:${ALPINE_VERSION} AS base
|
FROM alpine:${ALPINE_VERSION} AS base
|
||||||
RUN apk add --no-cache git gpg
|
RUN apk add --no-cache git gpg
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.5
|
ARG GO_VERSION=1.20.12
|
||||||
ARG ALPINE_VERSION=3.20
|
ARG ALPINE_VERSION=3.18
|
||||||
ARG GOLANGCI_LINT_VERSION=v1.59.1
|
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||||
ARG BUILDTAGS=""
|
ARG BUILDTAGS=""
|
||||||
|
|
||||||
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.5
|
ARG GO_VERSION=1.20.12
|
||||||
ARG ALPINE_VERSION=3.20
|
ARG ALPINE_VERSION=3.18
|
||||||
ARG MODOUTDATED_VERSION=v0.8.0
|
ARG MODOUTDATED_VERSION=v0.8.0
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||||
|
@ -40,6 +40,7 @@ EOT
|
||||||
|
|
||||||
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
||||||
FROM base AS outdated
|
FROM base AS outdated
|
||||||
|
ARG _RANDOM
|
||||||
RUN --mount=target=.,ro \
|
RUN --mount=target=.,ro \
|
||||||
--mount=target=/go/pkg/mod,type=cache \
|
--mount=target=/go/pkg/mod,type=cache \
|
||||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||||
|
|
|
@ -50,7 +50,7 @@ specify it in the `docker run` command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run -d -p 5000:5000 --restart=always --name registry \
|
$ docker run -d -p 5000:5000 --restart=always --name registry \
|
||||||
-v `pwd`/config.yml:/etc/distribution/config.yml \
|
-v `pwd`/config.yml:/etc/docker/registry/config.yml \
|
||||||
registry:2
|
registry:2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -141,8 +141,6 @@ storage:
|
||||||
usedualstack: false
|
usedualstack: false
|
||||||
loglevel: debug
|
loglevel: debug
|
||||||
inmemory: # This driver takes no parameters
|
inmemory: # This driver takes no parameters
|
||||||
tag:
|
|
||||||
concurrencylimit: 8
|
|
||||||
delete:
|
delete:
|
||||||
enabled: false
|
enabled: false
|
||||||
redirect:
|
redirect:
|
||||||
|
@ -168,10 +166,6 @@ auth:
|
||||||
service: token-service
|
service: token-service
|
||||||
issuer: registry-token-issuer
|
issuer: registry-token-issuer
|
||||||
rootcertbundle: /root/certs/bundle
|
rootcertbundle: /root/certs/bundle
|
||||||
jwks: /path/to/jwks
|
|
||||||
signingalgorithms:
|
|
||||||
- EdDSA
|
|
||||||
- HS256
|
|
||||||
htpasswd:
|
htpasswd:
|
||||||
realm: basic-realm
|
realm: basic-realm
|
||||||
path: /path/to/htpasswd
|
path: /path/to/htpasswd
|
||||||
|
@ -226,8 +220,6 @@ http:
|
||||||
X-Content-Type-Options: [nosniff]
|
X-Content-Type-Options: [nosniff]
|
||||||
http2:
|
http2:
|
||||||
disabled: false
|
disabled: false
|
||||||
h2c:
|
|
||||||
enabled: false
|
|
||||||
notifications:
|
notifications:
|
||||||
events:
|
events:
|
||||||
includereferences: true
|
includereferences: true
|
||||||
|
@ -247,20 +239,16 @@ notifications:
|
||||||
actions:
|
actions:
|
||||||
- pull
|
- pull
|
||||||
redis:
|
redis:
|
||||||
tls:
|
addr: localhost:6379
|
||||||
certificate: /path/to/cert.crt
|
|
||||||
key: /path/to/key.pem
|
|
||||||
clientcas:
|
|
||||||
- /path/to/ca.pem
|
|
||||||
addrs: [localhost:6379]
|
|
||||||
password: asecret
|
password: asecret
|
||||||
db: 0
|
db: 0
|
||||||
dialtimeout: 10ms
|
dialtimeout: 10ms
|
||||||
readtimeout: 10ms
|
readtimeout: 10ms
|
||||||
writetimeout: 10ms
|
writetimeout: 10ms
|
||||||
maxidleconns: 16
|
pool:
|
||||||
poolsize: 64
|
maxidle: 16
|
||||||
connmaxidletime: 300s
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
tls:
|
tls:
|
||||||
enabled: false
|
enabled: false
|
||||||
health:
|
health:
|
||||||
|
@ -296,11 +284,6 @@ validation:
|
||||||
- ^https?://([^/]+\.)*example\.com/
|
- ^https?://([^/]+\.)*example\.com/
|
||||||
deny:
|
deny:
|
||||||
- ^https?://www\.example\.com/
|
- ^https?://www\.example\.com/
|
||||||
indexes:
|
|
||||||
platforms: List
|
|
||||||
platformlist:
|
|
||||||
- architecture: amd64
|
|
||||||
os: linux
|
|
||||||
```
|
```
|
||||||
|
|
||||||
In some instances a configuration option is **optional** but it contains child
|
In some instances a configuration option is **optional** but it contains child
|
||||||
|
@ -452,16 +435,16 @@ use. You must configure exactly one backend. If you configure more, the registry
|
||||||
returns an error. You can choose any of these backend storage drivers:
|
returns an error. You can choose any of these backend storage drivers:
|
||||||
|
|
||||||
| Storage driver | Description |
|
| Storage driver | Description |
|
||||||
| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](../storage-drivers/filesystem.md). |
|
| `filesystem` | Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the [driver's reference documentation](/storage-drivers/filesystem). |
|
||||||
| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](../storage-drivers/azure.md). |
|
| `azure` | Uses Microsoft Azure Blob Storage. See the [driver's reference documentation](/storage-drivers/azure). |
|
||||||
| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](../storage-drivers/gcs.md). |
|
| `gcs` | Uses Google Cloud Storage. See the [driver's reference documentation](/storage-drivers/gcs). |
|
||||||
| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](../storage-drivers/s3.md). |
|
| `s3` | Uses Amazon Simple Storage Service (S3) and compatible Storage Services. See the [driver's reference documentation](/storage-drivers/s3). |
|
||||||
|
|
||||||
For testing only, you can use the [`inmemory` storage
|
For testing only, you can use the [`inmemory` storage
|
||||||
driver](../storage-drivers/inmemory.md).
|
driver](/storage-drivers/inmemory).
|
||||||
If you would like to run a registry from volatile memory, use the
|
If you would like to run a registry from volatile memory, use the
|
||||||
[`filesystem` driver](../storage-drivers/filesystem.md)
|
[`filesystem` driver](/storage-drivers/filesystem)
|
||||||
on a ramdisk.
|
on a ramdisk.
|
||||||
|
|
||||||
If you are deploying a registry on Windows, a Windows volume mounted from the
|
If you are deploying a registry on Windows, a Windows volume mounted from the
|
||||||
|
@ -536,26 +519,6 @@ parameter sets a limit on the number of descriptors to store in the cache.
|
||||||
The default value is 10000. If this parameter is set to 0, the cache is allowed
|
The default value is 10000. If this parameter is set to 0, the cache is allowed
|
||||||
to grow with no size limit.
|
to grow with no size limit.
|
||||||
|
|
||||||
### `tag`
|
|
||||||
|
|
||||||
The `tag` subsection provides configuration to set concurrency limit for tag lookup.
|
|
||||||
When user calls into the registry to delete the manifest, which in turn then does a
|
|
||||||
lookup for all tags that reference the deleted manifest. To find the tag references,
|
|
||||||
the registry will iterate every tag in the repository and read it's link file to check
|
|
||||||
if it matches the deleted manifest (i.e. to see if uses the same sha256 digest).
|
|
||||||
So, the more tags in repository, the worse the performance will be (as there will
|
|
||||||
be more S3 API calls occurring for the tag directory lookups and tag file reads if
|
|
||||||
using S3 storage driver).
|
|
||||||
|
|
||||||
Therefore, add a single flag `concurrencylimit` to set concurrency limit to optimize tag
|
|
||||||
lookup performance under the `tag` section. When a value is not provided or equal to 0,
|
|
||||||
`GOMAXPROCS` will be used.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
tag:
|
|
||||||
concurrencylimit: 8
|
|
||||||
```
|
|
||||||
|
|
||||||
### `redirect`
|
### `redirect`
|
||||||
|
|
||||||
The `redirect` subsection provides configuration for managing redirects from
|
The `redirect` subsection provides configuration for managing redirects from
|
||||||
|
@ -585,11 +548,6 @@ auth:
|
||||||
service: token-service
|
service: token-service
|
||||||
issuer: registry-token-issuer
|
issuer: registry-token-issuer
|
||||||
rootcertbundle: /root/certs/bundle
|
rootcertbundle: /root/certs/bundle
|
||||||
jwks: /path/to/jwks
|
|
||||||
signingalgorithms:
|
|
||||||
- EdDSA
|
|
||||||
- HS256
|
|
||||||
- ES512
|
|
||||||
htpasswd:
|
htpasswd:
|
||||||
realm: basic-realm
|
realm: basic-realm
|
||||||
path: /path/to/htpasswd
|
path: /path/to/htpasswd
|
||||||
|
@ -626,48 +584,16 @@ the registry. It is an established authentication paradigm with a high degree of
|
||||||
security.
|
security.
|
||||||
|
|
||||||
| Parameter | Required | Description |
|
| Parameter | Required | Description |
|
||||||
|----------------------|----------|-------------------------------------------------------|
|
|-----------|----------|-------------------------------------------------------|
|
||||||
| `realm` | yes | The realm in which the registry server authenticates. |
|
| `realm` | yes | The realm in which the registry server authenticates. |
|
||||||
| `service` | yes | The service being authenticated. |
|
| `service` | yes | The service being authenticated. |
|
||||||
| `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. |
|
| `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. |
|
||||||
| `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. |
|
| `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. |
|
||||||
| `autoredirect` | no | When set to `true`, `realm` will be set to the Host header of the request as the domain and a path of `/auth/token/`(or specified by `autoredirectpath`), the `realm` URL Scheme will use `X-Forwarded-Proto` header if set, otherwise it will be set to `https`. |
|
| `autoredirect` | no | When set to `true`, `realm` will automatically be set using the Host header of the request as the domain and a path of `/auth/token/`|
|
||||||
| `autoredirectpath` | no | The path to redirect to if `autoredirect` is set to `true`, default: `/auth/token/`. |
|
|
||||||
| `signingalgorithms` | no | A list of token signing algorithms to use for verifying token signatures. If left empty the default list of signing algorithms is used. Please see below for allowed values and default. |
|
|
||||||
| `jwks` | no | The absolute path to the JSON Web Key Set (JWKS) file. The JWKS file contains the trusted keys used to verify the signature of authentication tokens. |
|
|
||||||
|
|
||||||
Available `signingalgorithms`:
|
|
||||||
- EdDSA
|
|
||||||
- HS256
|
|
||||||
- HS384
|
|
||||||
- HS512
|
|
||||||
- RS256
|
|
||||||
- RS384
|
|
||||||
- RS512
|
|
||||||
- ES256
|
|
||||||
- ES384
|
|
||||||
- ES512
|
|
||||||
- PS256
|
|
||||||
- PS384
|
|
||||||
- PS512
|
|
||||||
|
|
||||||
Default `signingalgorithms`:
|
|
||||||
- EdDSA
|
|
||||||
- HS256
|
|
||||||
- HS384
|
|
||||||
- HS512
|
|
||||||
- RS256
|
|
||||||
- RS384
|
|
||||||
- RS512
|
|
||||||
- ES256
|
|
||||||
- ES384
|
|
||||||
- ES512
|
|
||||||
- PS256
|
|
||||||
- PS384
|
|
||||||
- PS512
|
|
||||||
|
|
||||||
For more information about Token based authentication configuration, see the
|
For more information about Token based authentication configuration, see the
|
||||||
[specification](../spec/auth/token.md).
|
[specification](/spec/auth/token).
|
||||||
|
|
||||||
### `htpasswd`
|
### `htpasswd`
|
||||||
|
|
||||||
|
@ -798,8 +724,6 @@ http:
|
||||||
X-Content-Type-Options: [nosniff]
|
X-Content-Type-Options: [nosniff]
|
||||||
http2:
|
http2:
|
||||||
disabled: false
|
disabled: false
|
||||||
h2c:
|
|
||||||
enabled: false
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The `http` option details the configuration for the HTTP server that hosts the
|
The `http` option details the configuration for the HTTP server that hosts the
|
||||||
|
@ -946,24 +870,13 @@ registry. This header is included in the example configuration file.
|
||||||
|
|
||||||
### `http2`
|
### `http2`
|
||||||
|
|
||||||
The `http2` structure within `http` is **optional**. Use this to control HTTP/2 over TLS
|
The `http2` structure within `http` is **optional**. Use this to control http2
|
||||||
settings for the registry.
|
settings for the registry.
|
||||||
If `tls` is not configured this option is ignored. To enable HTTP/2 over non TLS connections use `h2c` instead.
|
|
||||||
|
|
||||||
| Parameter | Required | Description |
|
| Parameter | Required | Description |
|
||||||
|-----------|----------|-------------------------------------------------------|
|
|-----------|----------|-------------------------------------------------------|
|
||||||
| `disabled` | no | If `true`, then `http2` support is disabled. |
|
| `disabled` | no | If `true`, then `http2` support is disabled. |
|
||||||
|
|
||||||
### `h2c`
|
|
||||||
|
|
||||||
The `h2c` structure within `http` is **optional**. Use this to control H2C (HTTP/2 Cleartext)
|
|
||||||
settings for the registry.
|
|
||||||
Useful when deploying the registry behind a load balancer (e.g. Google Cloud Run)
|
|
||||||
|
|
||||||
| Parameter | Required | Description |
|
|
||||||
|-----------|----------|-------------------------------------------------------|
|
|
||||||
| `enabled` | no | If `true`, then `h2c` support is enabled. |
|
|
||||||
|
|
||||||
## `notifications`
|
## `notifications`
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -1024,46 +937,72 @@ The `events` structure configures the information provided in event notification
|
||||||
|
|
||||||
## `redis`
|
## `redis`
|
||||||
|
|
||||||
Declare parameters for constructing the `redis` connections. Registry instances
|
|
||||||
may use the Redis instance for several applications. Currently, it caches
|
|
||||||
information about immutable blobs. Most of the `redis` options control
|
|
||||||
how the registry connects to the `redis` instance.
|
|
||||||
|
|
||||||
You should configure Redis with the **allkeys-lru** eviction policy, because the
|
|
||||||
registry does not set an expiration value on keys.
|
|
||||||
|
|
||||||
Under the hood distribution uses [`go-redis`](https://github.com/redis/go-redis) Go module for
|
|
||||||
Redis connectivity and its [`UniversalOptions`](https://pkg.go.dev/github.com/redis/go-redis/v9#UniversalOptions)
|
|
||||||
struct.
|
|
||||||
|
|
||||||
You can optionally specify TLS configuration on top of the `UniversalOptions` settings.
|
|
||||||
|
|
||||||
Use these settings to configure Redis TLS:
|
|
||||||
|
|
||||||
| Parameter | Required | Description |
|
|
||||||
|-----------|----------|-------------------------------------------------------|
|
|
||||||
| `certificate` | yes | Absolute path to the x509 certificate file. |
|
|
||||||
| `key` | yes | Absolute path to the x509 private key file. |
|
|
||||||
| `clientcas` | no | An array of absolute paths to x509 CA files. |
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
redis:
|
redis:
|
||||||
tls:
|
addr: localhost:6379
|
||||||
certificate: /path/to/cert.crt
|
|
||||||
key: /path/to/key.pem
|
|
||||||
clientcas:
|
|
||||||
- /path/to/ca.pem
|
|
||||||
addrs: [localhost:6379]
|
|
||||||
password: asecret
|
password: asecret
|
||||||
db: 0
|
db: 0
|
||||||
dialtimeout: 10ms
|
dialtimeout: 10ms
|
||||||
readtimeout: 10ms
|
readtimeout: 10ms
|
||||||
writetimeout: 10ms
|
writetimeout: 10ms
|
||||||
maxidleconns: 16
|
pool:
|
||||||
poolsize: 64
|
maxidle: 16
|
||||||
connmaxidletime: 300s
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Declare parameters for constructing the `redis` connections. Registry instances
|
||||||
|
may use the Redis instance for several applications. Currently, it caches
|
||||||
|
information about immutable blobs. Most of the `redis` options control
|
||||||
|
how the registry connects to the `redis` instance. You can control the pool's
|
||||||
|
behavior with the [pool](#pool) subsection. Additionally, you can control
|
||||||
|
TLS connection settings with the [tls](#tls) subsection (in-transit encryption).
|
||||||
|
|
||||||
|
You should configure Redis with the **allkeys-lru** eviction policy, because the
|
||||||
|
registry does not set an expiration value on keys.
|
||||||
|
|
||||||
|
| Parameter | Required | Description |
|
||||||
|
|-----------|----------|-------------------------------------------------------|
|
||||||
|
| `addr` | yes | The address (host and port) of the Redis instance. |
|
||||||
|
| `password`| no | A password used to authenticate to the Redis instance.|
|
||||||
|
| `db` | no | The name of the database to use for each connection. |
|
||||||
|
| `dialtimeout` | no | The timeout for connecting to the Redis instance. |
|
||||||
|
| `readtimeout` | no | The timeout for reading from the Redis instance. |
|
||||||
|
| `writetimeout` | no | The timeout for writing to the Redis instance. |
|
||||||
|
|
||||||
|
### `pool`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
pool:
|
||||||
|
maxidle: 16
|
||||||
|
maxactive: 64
|
||||||
|
idletimeout: 300s
|
||||||
|
```
|
||||||
|
|
||||||
|
Use these settings to configure the behavior of the Redis connection pool.
|
||||||
|
|
||||||
|
| Parameter | Required | Description |
|
||||||
|
|-----------|----------|-------------------------------------------------------|
|
||||||
|
| `maxidle` | no | The maximum number of idle connections in the pool. |
|
||||||
|
| `maxactive`| no | The maximum number of connections which can be open before blocking a connection request. |
|
||||||
|
| `idletimeout`| no | How long to wait before closing inactive connections. |
|
||||||
|
|
||||||
|
### `tls`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Use these settings to configure Redis TLS.
|
||||||
|
|
||||||
|
| Parameter | Required | Description |
|
||||||
|
|-----------|----------|-------------------------------------- |
|
||||||
|
| `enabled` | no | Whether or not to use TLS in-transit. |
|
||||||
|
|
||||||
|
|
||||||
## `health`
|
## `health`
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -1161,7 +1100,7 @@ proxy:
|
||||||
|
|
||||||
The `proxy` structure allows a registry to be configured as a pull-through cache
|
The `proxy` structure allows a registry to be configured as a pull-through cache
|
||||||
to Docker Hub. See
|
to Docker Hub. See
|
||||||
[mirror](../recipes/mirror.md)
|
[mirror](/recipes/mirror)
|
||||||
for more information. Pushing to a registry configured as a pull-through cache
|
for more information. Pushing to a registry configured as a pull-through cache
|
||||||
is unsupported.
|
is unsupported.
|
||||||
|
|
||||||
|
@ -1183,14 +1122,14 @@ username (such as `batman`) and the password for that username.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
validation:
|
validation:
|
||||||
disabled: false
|
manifests:
|
||||||
|
urls:
|
||||||
|
allow:
|
||||||
|
- ^https?://([^/]+\.)*example\.com/
|
||||||
|
deny:
|
||||||
|
- ^https?://www\.example\.com/
|
||||||
```
|
```
|
||||||
|
|
||||||
Use these settings to configure what validation the registry performs on content.
|
|
||||||
|
|
||||||
Validation is performed when content is uploaded to the registry. Changing these
|
|
||||||
settings will not validate content that has already been accepting into the registry.
|
|
||||||
|
|
||||||
### `disabled`
|
### `disabled`
|
||||||
|
|
||||||
The `disabled` flag disables the other options in the `validation`
|
The `disabled` flag disables the other options in the `validation`
|
||||||
|
@ -1203,16 +1142,6 @@ Use the `manifests` subsection to configure validation of manifests. If
|
||||||
|
|
||||||
#### `urls`
|
#### `urls`
|
||||||
|
|
||||||
```yaml
|
|
||||||
validation:
|
|
||||||
manifests:
|
|
||||||
urls:
|
|
||||||
allow:
|
|
||||||
- ^https?://([^/]+\.)*example\.com/
|
|
||||||
deny:
|
|
||||||
- ^https?://www\.example\.com/
|
|
||||||
```
|
|
||||||
|
|
||||||
The `allow` and `deny` options are each a list of
|
The `allow` and `deny` options are each a list of
|
||||||
[regular expressions](https://pkg.go.dev/regexp/syntax) that restrict the URLs in
|
[regular expressions](https://pkg.go.dev/regexp/syntax) that restrict the URLs in
|
||||||
pushed manifests.
|
pushed manifests.
|
||||||
|
@ -1226,54 +1155,6 @@ one of the `allow` regular expressions **and** one of the following holds:
|
||||||
2. `deny` is set but no URLs within the manifest match any of the `deny` regular
|
2. `deny` is set but no URLs within the manifest match any of the `deny` regular
|
||||||
expressions.
|
expressions.
|
||||||
|
|
||||||
#### `indexes`
|
|
||||||
|
|
||||||
By default the registry will validate that all platform images exist when an image
|
|
||||||
index is uploaded to the registry. Disabling this validatation is experimental
|
|
||||||
because other tooling that uses the registry may expect the image index to be complete.
|
|
||||||
|
|
||||||
validation:
|
|
||||||
manifests:
|
|
||||||
indexes:
|
|
||||||
platforms: [all|none|list]
|
|
||||||
platformlist:
|
|
||||||
- os: linux
|
|
||||||
architecture: amd64
|
|
||||||
|
|
||||||
Use these settings to configure what validation the registry performs on image
|
|
||||||
index manifests uploaded to the registry.
|
|
||||||
|
|
||||||
##### `platforms`
|
|
||||||
|
|
||||||
Set `platformexist` to `all` (the default) to validate all platform images exist.
|
|
||||||
The registry will validate that the images referenced by the index exist in the
|
|
||||||
registry before accepting the image index.
|
|
||||||
|
|
||||||
Set `platforms` to `none` to disable all validation that images exist when an
|
|
||||||
image index manifest is uploaded. This allows image lists to be uploaded to the
|
|
||||||
registry without their associated images. This setting is experimental because
|
|
||||||
other tooling that uses the registry may expect the image index to be complete.
|
|
||||||
|
|
||||||
Set `platforms` to `list` to selectively validate the existence of platforms
|
|
||||||
within image index manifests. This setting is experimental because other tooling
|
|
||||||
that uses the registry may expect the image index to be complete.
|
|
||||||
|
|
||||||
##### `platformlist`
|
|
||||||
|
|
||||||
When `platforms` is set to `list`, set `platformlist` to an array of
|
|
||||||
platforms to validate. If a platform is included in this the array and in the images
|
|
||||||
contained within an index, the registry will validate that the platform specific image
|
|
||||||
exists in the registry before accepting the index. The registry will not validate the
|
|
||||||
existence of platform specific images in the index that do not appear in the
|
|
||||||
`platformlist` array.
|
|
||||||
|
|
||||||
This parameter does not validate that the configured platforms are included in every
|
|
||||||
index. If an image index does not include one of the platform specific images configured
|
|
||||||
in the `platformlist` array, it may still be accepted by the registry.
|
|
||||||
|
|
||||||
Each platform is a map with two keys, `os` and `architecture`, as defined in the
|
|
||||||
[OCI Image Index specification](https://github.com/opencontainers/image-spec/blob/main/image-index.md#image-index-property-descriptions).
|
|
||||||
|
|
||||||
## Example: Development configuration
|
## Example: Development configuration
|
||||||
|
|
||||||
You can use this simple example for local development:
|
You can use this simple example for local development:
|
||||||
|
|
|
@ -9,7 +9,7 @@ A registry is an instance of the `registry` image, and runs within Docker.
|
||||||
|
|
||||||
This topic provides basic information about deploying and configuring a
|
This topic provides basic information about deploying and configuring a
|
||||||
registry. For an exhaustive list of configuration options, see the
|
registry. For an exhaustive list of configuration options, see the
|
||||||
[configuration reference](configuration.md).
|
[configuration reference](../configuration).
|
||||||
|
|
||||||
If you have an air-gapped datacenter, see
|
If you have an air-gapped datacenter, see
|
||||||
[Considerations for air-gapped registries](#considerations-for-air-gapped-registries).
|
[Considerations for air-gapped registries](#considerations-for-air-gapped-registries).
|
||||||
|
@ -27,7 +27,7 @@ The registry is now ready to use.
|
||||||
> **Warning**: These first few examples show registry configurations that are
|
> **Warning**: These first few examples show registry configurations that are
|
||||||
> only appropriate for testing. A production-ready registry must be protected by
|
> only appropriate for testing. A production-ready registry must be protected by
|
||||||
> TLS and should ideally use an access-control mechanism. Keep reading and then
|
> TLS and should ideally use an access-control mechanism. Keep reading and then
|
||||||
> continue to the [configuration guide](configuration.md) to deploy a
|
> continue to the [configuration guide](../configuration) to deploy a
|
||||||
> production-ready registry.
|
> production-ready registry.
|
||||||
|
|
||||||
## Copy an image from Docker Hub to your registry
|
## Copy an image from Docker Hub to your registry
|
||||||
|
@ -94,7 +94,7 @@ To configure the container, you can pass additional or modified options to the
|
||||||
`docker run` command.
|
`docker run` command.
|
||||||
|
|
||||||
The following sections provide basic guidelines for configuring your registry.
|
The following sections provide basic guidelines for configuring your registry.
|
||||||
For more details, see the [registry configuration reference](configuration.md).
|
For more details, see the [registry configuration reference](../configuration).
|
||||||
|
|
||||||
### Start the registry automatically
|
### Start the registry automatically
|
||||||
|
|
||||||
|
@ -166,8 +166,8 @@ $ docker run -d \
|
||||||
By default, the registry stores its data on the local filesystem, whether you
|
By default, the registry stores its data on the local filesystem, whether you
|
||||||
use a bind mount or a volume. You can store the registry data in an Amazon S3
|
use a bind mount or a volume. You can store the registry data in an Amazon S3
|
||||||
bucket, Google Cloud Platform, or on another storage back-end by using
|
bucket, Google Cloud Platform, or on another storage back-end by using
|
||||||
[storage drivers](../storage-drivers/_index.md). For more information, see
|
[storage drivers](/storage-drivers). For more information, see
|
||||||
[storage configuration options](configuration.md#storage).
|
[storage configuration options](../configuration#storage).
|
||||||
|
|
||||||
## Run an externally-accessible registry
|
## Run an externally-accessible registry
|
||||||
|
|
||||||
|
@ -252,13 +252,13 @@ The registry supports using Let's Encrypt to automatically obtain a
|
||||||
browser-trusted certificate. For more information on Let's Encrypt, see
|
browser-trusted certificate. For more information on Let's Encrypt, see
|
||||||
[https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/)
|
[https://letsencrypt.org/how-it-works/](https://letsencrypt.org/how-it-works/)
|
||||||
and the relevant section of the
|
and the relevant section of the
|
||||||
[registry configuration](configuration.md#letsencrypt).
|
[registry configuration](../configuration#letsencrypt).
|
||||||
|
|
||||||
### Use an insecure registry (testing only)
|
### Use an insecure registry (testing only)
|
||||||
|
|
||||||
It is possible to use a self-signed certificate, or to use our registry
|
It is possible to use a self-signed certificate, or to use our registry
|
||||||
insecurely. Unless you have set up verification for your self-signed
|
insecurely. Unless you have set up verification for your self-signed
|
||||||
certificate, this is for testing only. See [run an insecure registry](insecure.md).
|
certificate, this is for testing only. See [run an insecure registry](../insecure).
|
||||||
|
|
||||||
## Run the registry as a service
|
## Run the registry as a service
|
||||||
|
|
||||||
|
@ -462,20 +462,20 @@ using htpasswd, all authentication attempts will fail.
|
||||||
{{< hint type=note title="X509 errors" >}}
|
{{< hint type=note title="X509 errors" >}}
|
||||||
X509 errors usually indicate that you are attempting to use
|
X509 errors usually indicate that you are attempting to use
|
||||||
a self-signed certificate without configuring the Docker daemon correctly.
|
a self-signed certificate without configuring the Docker daemon correctly.
|
||||||
See [run an insecure registry](insecure.md).
|
See [run an insecure registry](../insecure).
|
||||||
{{< /hint >}}
|
{{< /hint >}}
|
||||||
|
|
||||||
### More advanced authentication
|
### More advanced authentication
|
||||||
|
|
||||||
You may want to leverage more advanced basic auth implementations by using a
|
You may want to leverage more advanced basic auth implementations by using a
|
||||||
proxy in front of the registry. See the [recipes list](../recipes/_index.md).
|
proxy in front of the registry. See the [recipes list](/recipes/).
|
||||||
|
|
||||||
The registry also supports delegated authentication which redirects users to a
|
The registry also supports delegated authentication which redirects users to a
|
||||||
specific trusted token server. This approach is more complicated to set up, and
|
specific trusted token server. This approach is more complicated to set up, and
|
||||||
only makes sense if you need to fully configure ACLs and need more control over
|
only makes sense if you need to fully configure ACLs and need more control over
|
||||||
the registry's integration into your global authorization and authentication
|
the registry's integration into your global authorization and authentication
|
||||||
systems. Refer to the following [background information](../spec/auth/token.md) and
|
systems. Refer to the following [background information](/spec/auth/token) and
|
||||||
[configuration information here](configuration.md#auth).
|
[configuration information here](../configuration#auth).
|
||||||
|
|
||||||
This approach requires you to implement your own authentication system or
|
This approach requires you to implement your own authentication system or
|
||||||
leverage a third-party implementation.
|
leverage a third-party implementation.
|
||||||
|
@ -572,9 +572,9 @@ artifacts.
|
||||||
|
|
||||||
More specific and advanced information is available in the following sections:
|
More specific and advanced information is available in the following sections:
|
||||||
|
|
||||||
- [Configuration reference](configuration.md)
|
- [Configuration reference](../configuration)
|
||||||
- [Working with notifications](notifications.md)
|
- [Working with notifications](../notifications)
|
||||||
- [Advanced "recipes"](../recipes/_index.md)
|
- [Advanced "recipes"](/recipes)
|
||||||
- [Registry API](../spec/api.md)
|
- [Registry API](/spec/api)
|
||||||
- [Storage driver model](../storage-drivers/_index.md)
|
- [Storage driver model](/storage-drivers)
|
||||||
- [Token authentication](../spec/auth/token.md)
|
- [Token authentication](/spec/auth/token)
|
||||||
|
|
|
@ -21,15 +21,15 @@ that certain layers no longer exist on the filesystem.
|
||||||
|
|
||||||
Filesystem layers are stored by their content address in the Registry. This
|
Filesystem layers are stored by their content address in the Registry. This
|
||||||
has many advantages, one of which is that data is stored once and referred to by manifests.
|
has many advantages, one of which is that data is stored once and referred to by manifests.
|
||||||
See [here](compatibility.md#content-addressable-storage-cas) for more details.
|
See [here](../compatibility#content-addressable-storage-cas) for more details.
|
||||||
|
|
||||||
Layers are therefore shared amongst manifests; each manifest maintains a reference
|
Layers are therefore shared amongst manifests; each manifest maintains a reference
|
||||||
to the layer. As long as a layer is referenced by one manifest, it cannot be garbage
|
to the layer. As long as a layer is referenced by one manifest, it cannot be garbage
|
||||||
collected.
|
collected.
|
||||||
|
|
||||||
Manifests and layers can be `deleted` with the registry API (refer to the API
|
Manifests and layers can be `deleted` with the registry API (refer to the API
|
||||||
documentation [here](../spec/api.md#deleting-a-layer) and
|
documentation [here](/spec/api#deleting-a-layer) and
|
||||||
[here](../spec/api.md#deleting-an-image) for details). This API removes references
|
[here](/spec/api#deleting-an-image) for details). This API removes references
|
||||||
to the target and makes them eligible for garbage collection. It also makes them
|
to the target and makes them eligible for garbage collection. It also makes them
|
||||||
unable to be read via the API.
|
unable to be read via the API.
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ This is more secure than the insecure registry solution.
|
||||||
|
|
||||||
Be sure to use the name `myregistry.domain.com` as a CN.
|
Be sure to use the name `myregistry.domain.com` as a CN.
|
||||||
|
|
||||||
2. Use the result to [start your registry with TLS enabled](deploying.md#get-a-certificate).
|
2. Use the result to [start your registry with TLS enabled](../deploying#get-a-certificate).
|
||||||
|
|
||||||
3. Instruct every Docker daemon to trust that certificate. The way to do this
|
3. Instruct every Docker daemon to trust that certificate. The way to do this
|
||||||
depends on your OS.
|
depends on your OS.
|
||||||
|
|
|
@ -10,7 +10,7 @@ pushes and pulls and layer pushes and pulls. These actions are serialized into
|
||||||
events. The events are queued into a registry-internal broadcast system which
|
events. The events are queued into a registry-internal broadcast system which
|
||||||
queues and dispatches events to [_Endpoints_](#endpoints).
|
queues and dispatches events to [_Endpoints_](#endpoints).
|
||||||
|
|
||||||
![Workflow of registry notifications](/distribution/images/notifications.png)
|
![Workflow of registry notifications](../../images/notifications.png)
|
||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ order is not guaranteed.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
To set up a registry instance to send notifications to endpoints, one must add
|
To setup a registry instance to send notifications to endpoints, one must add
|
||||||
them to the configuration. A simple example follows:
|
them to the configuration. A simple example follows:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -45,7 +45,7 @@ The above would configure the registry with an endpoint to send events to
|
||||||
5 failures happen consecutively, the registry backs off for 1 second before
|
5 failures happen consecutively, the registry backs off for 1 second before
|
||||||
trying again.
|
trying again.
|
||||||
|
|
||||||
For details on the fields, see the [configuration documentation](configuration.md#notifications).
|
For details on the fields, see the [configuration documentation](../configuration/#notifications).
|
||||||
|
|
||||||
A properly configured endpoint should lead to a log message from the registry
|
A properly configured endpoint should lead to a log message from the registry
|
||||||
upon startup:
|
upon startup:
|
||||||
|
|
|
@ -12,7 +12,7 @@ Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO
|
||||||
|
|
||||||
### Alternatives
|
### Alternatives
|
||||||
|
|
||||||
If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](../about/deploying.md#native-basic-auth).
|
If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](/about/deploying#native-basic-auth).
|
||||||
|
|
||||||
### Solution
|
### Solution
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ The following table shows examples of allowed and disallowed mirror URLs.
|
||||||
|
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> Mirrors of Docker Hub are still subject to Docker's [fair usage policy](https://www.docker.com/pricing/resource-consumption-updates).
|
> Mirrors of Docker Hub are still subject to Docker's [fair usage policy](https://www.docker.com/pricing/resource-consumption-updates){: target="blank" rel="noopener" class=“”}.
|
||||||
|
|
||||||
### Solution
|
### Solution
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ be configured to use the `filesystem` driver for storage.
|
||||||
|
|
||||||
The easiest way to run a registry as a pull through cache is to run the official
|
The easiest way to run a registry as a pull through cache is to run the official
|
||||||
Registry image.
|
Registry image.
|
||||||
At least, you need to specify `proxy.remoteurl` within `/etc/distribution/config.yml`
|
At least, you need to specify `proxy.remoteurl` within `/etc/docker/registry/config.yml`
|
||||||
as described in the following subsection.
|
as described in the following subsection.
|
||||||
|
|
||||||
Multiple registry caches can be deployed over the same back-end. A single
|
Multiple registry caches can be deployed over the same back-end. A single
|
||||||
|
@ -107,7 +107,7 @@ proxy:
|
||||||
|
|
||||||
> **Warning**: For the scheduler to clean up old entries, `delete` must
|
> **Warning**: For the scheduler to clean up old entries, `delete` must
|
||||||
> be enabled in the registry configuration. See
|
> be enabled in the registry configuration. See
|
||||||
> [Registry Configuration](../about/configuration.md) for more details.
|
> [Registry Configuration](/about/configuration) for more details.
|
||||||
|
|
||||||
### Configure the Docker daemon
|
### Configure the Docker daemon
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ mechanism fronting their internal http portal.
|
||||||
|
|
||||||
If you just want authentication for your registry, and are happy maintaining
|
If you just want authentication for your registry, and are happy maintaining
|
||||||
users access separately, you should really consider sticking with the native
|
users access separately, you should really consider sticking with the native
|
||||||
[basic auth registry feature](../about/deploying.md#native-basic-auth).
|
[basic auth registry feature](/about/deploying#native-basic-auth).
|
||||||
|
|
||||||
### Solution
|
### Solution
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ keywords: registry, service, images, repository, json
|
||||||
|
|
||||||
# Docker Registry Reference
|
# Docker Registry Reference
|
||||||
|
|
||||||
* [HTTP API V2](api.md)
|
* [HTTP API V2](api)
|
||||||
* [Storage Driver](../storage-drivers/_index.md)
|
* [Storage Driver](/storage-drivers/)
|
||||||
* [Token Authentication Specification](auth/token.md)
|
* [Token Authentication Specification](auth/token)
|
||||||
* [Token Authentication Implementation](auth/jwt.md)
|
* [Token Authentication Implementation](auth/jwt)
|
||||||
|
|
|
@ -416,7 +416,7 @@ reference may include a tag or digest.
|
||||||
|
|
||||||
The client should include an Accept header indicating which manifest content
|
The client should include an Accept header indicating which manifest content
|
||||||
types it supports. For more details on the manifest format and content types,
|
types it supports. For more details on the manifest format and content types,
|
||||||
see [Image Manifest Version 2, Schema 2](manifest-v2-2.md).
|
see [Image Manifest Version 2, Schema 2](../manifest-v2-2).
|
||||||
In a successful response, the Content-Type header will indicate which manifest type is being returned.
|
In a successful response, the Content-Type header will indicate which manifest type is being returned.
|
||||||
|
|
||||||
A `404 Not Found` response will be returned if the image is unknown to the
|
A `404 Not Found` response will be returned if the image is unknown to the
|
||||||
|
@ -840,7 +840,7 @@ Content-Type: <manifest media type>
|
||||||
The `name` and `reference` fields of the response body must match those
|
The `name` and `reference` fields of the response body must match those
|
||||||
specified in the URL. The `reference` field may be a "tag" or a "digest". The
|
specified in the URL. The `reference` field may be a "tag" or a "digest". The
|
||||||
content type should match the type of the manifest being uploaded, as specified
|
content type should match the type of the manifest being uploaded, as specified
|
||||||
in [Image Manifest Version 2, Schema 2](manifest-v2-2.md).
|
in [Image Manifest Version 2, Schema 2](../manifest-v2-2).
|
||||||
|
|
||||||
If there is a problem with pushing the manifest, a relevant 4xx response will
|
If there is a problem with pushing the manifest, a relevant 4xx response will
|
||||||
be returned with a JSON error message. Please see the
|
be returned with a JSON error message. Please see the
|
||||||
|
@ -1088,7 +1088,7 @@ response will be issued instead.
|
||||||
|
|
||||||
Accept: application/vnd.docker.distribution.manifest.v2+json
|
Accept: application/vnd.docker.distribution.manifest.v2+json
|
||||||
|
|
||||||
> for more details, see: [compatibility](../about/compatibility.md#content-addressable-storage-cas)
|
> for more details, see: [compatibility](/about/compatibility#content-addressable-storage-cas)
|
||||||
|
|
||||||
## Detail
|
## Detail
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ reference for the protocol and HTTP endpoints described here.
|
||||||
|
|
||||||
**Note**: Not all token servers implement oauth2. If the request to the endpoint
|
**Note**: Not all token servers implement oauth2. If the request to the endpoint
|
||||||
returns `404` using the HTTP `POST` method, refer to
|
returns `404` using the HTTP `POST` method, refer to
|
||||||
[Token Documentation](token.md) for using the HTTP `GET` method supported by all
|
[Token Documentation](../token) for using the HTTP `GET` method supported by all
|
||||||
token servers.
|
token servers.
|
||||||
|
|
||||||
## Refresh token format
|
## Refresh token format
|
||||||
|
|
|
@ -144,7 +144,7 @@ Each JWT access token may only have a single subject and audience but multiple
|
||||||
resource scopes. The subject and audience are put into standard JWT fields
|
resource scopes. The subject and audience are put into standard JWT fields
|
||||||
`sub` and `aud`. The resource scope is put into the `access` field. The
|
`sub` and `aud`. The resource scope is put into the `access` field. The
|
||||||
structure of the access field can be seen in the
|
structure of the access field can be seen in the
|
||||||
[jwt documentation](jwt.md).
|
[jwt documentation](../jwt).
|
||||||
|
|
||||||
## Refresh Tokens
|
## Refresh Tokens
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ keywords: registry, on-prem, images, tags, repository, distribution, Bearer auth
|
||||||
|
|
||||||
This document outlines the v2 Distribution registry authentication scheme:
|
This document outlines the v2 Distribution registry authentication scheme:
|
||||||
|
|
||||||
![v2 registry auth](/distribution/images/v2-registry-auth.png)
|
![v2 registry auth](../../../images/v2-registry-auth.png)
|
||||||
|
|
||||||
1. Attempt to begin a push/pull operation with the registry.
|
1. Attempt to begin a push/pull operation with the registry.
|
||||||
2. If the registry requires authorization it will return a `401 Unauthorized`
|
2. If the registry requires authorization it will return a `401 Unauthorized`
|
||||||
|
@ -188,7 +188,7 @@ https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba
|
||||||
|
|
||||||
The token server should first attempt to authenticate the client using any
|
The token server should first attempt to authenticate the client using any
|
||||||
authentication credentials provided with the request. From Docker 1.11 the
|
authentication credentials provided with the request. From Docker 1.11 the
|
||||||
Docker engine supports both Basic Authentication and [OAuth2](oauth.md) for
|
Docker engine supports both Basic Authentication and [OAuth2](../oauth) for
|
||||||
getting tokens. Docker 1.10 and before, the registry client in the Docker Engine
|
getting tokens. Docker 1.10 and before, the registry client in the Docker Engine
|
||||||
only supports Basic Authentication. If an attempt to authenticate to the token
|
only supports Basic Authentication. If an attempt to authenticate to the token
|
||||||
server fails, the token server should return a `401 Unauthorized` response
|
server fails, the token server should return a `401 Unauthorized` response
|
||||||
|
|
|
@ -71,7 +71,7 @@ image manifest based on the Content-Type returned in the HTTP response.
|
||||||
- **`digest`** *string*
|
- **`digest`** *string*
|
||||||
|
|
||||||
The digest of the content, as defined by the
|
The digest of the content, as defined by the
|
||||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
[Registry V2 HTTP API Specificiation](../api#digest-parameter).
|
||||||
|
|
||||||
- **`platform`** *object*
|
- **`platform`** *object*
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ image. It's the direct replacement for the schema-1 manifest.
|
||||||
- **`digest`** *string*
|
- **`digest`** *string*
|
||||||
|
|
||||||
The digest of the content, as defined by the
|
The digest of the content, as defined by the
|
||||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
[Registry V2 HTTP API Specificiation](../api#digest-parameter).
|
||||||
|
|
||||||
- **`layers`** *array*
|
- **`layers`** *array*
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ image. It's the direct replacement for the schema-1 manifest.
|
||||||
- **`digest`** *string*
|
- **`digest`** *string*
|
||||||
|
|
||||||
The digest of the content, as defined by the
|
The digest of the content, as defined by the
|
||||||
[Registry V2 HTTP API Specification](api.md#digest-parameter).
|
[Registry V2 HTTP API Specificiation](../api#digest-parameter).
|
||||||
|
|
||||||
- **`urls`** *array*
|
- **`urls`** *array*
|
||||||
|
|
||||||
|
|
|
@ -20,22 +20,7 @@ An implementation of the `storagedriver.StorageDriver` interface which uses [Mic
|
||||||
|
|
||||||
## Related information
|
## Related information
|
||||||
|
|
||||||
* To get information about Azure blob storage [the offical docs](https://azure.microsoft.com/en-us/services/storage/).
|
* To get information about
|
||||||
* You can use Azure [Blob Service REST API](https://docs.microsoft.com/en-us/rest/api/storageservices/Blob-Service-REST-API) to [create a storage container](https://docs.microsoft.com/en-us/rest/api/storageservices/Create-Container).
|
[azure-blob-storage](https://azure.microsoft.com/en-us/services/storage/), visit
|
||||||
|
the Microsoft website.
|
||||||
## Azure identity
|
* You can use Microsoft's [Blob Service REST API](https://docs.microsoft.com/en-us/rest/api/storageservices/Blob-Service-REST-API) to [create a storage container](https://docs.microsoft.com/en-us/rest/api/storageservices/Create-Container).
|
||||||
|
|
||||||
In order to use managed identity to access Azure blob storage you can use [Microsoft Bicep](https://learn.microsoft.com/en-us/azure/templates/microsoft.app/managedenvironments/storages?pivots=deployment-language-bicep).
|
|
||||||
|
|
||||||
The following will configure credentials that will be used by the Azure storage driver to construct AZ Identity that will be used to access the blob storage:
|
|
||||||
```
|
|
||||||
properties: {
|
|
||||||
azure: {
|
|
||||||
accountname: accountname
|
|
||||||
container: containername
|
|
||||||
credentials: {
|
|
||||||
type: default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
|
@ -17,8 +17,4 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Goog
|
||||||
|
|
||||||
{{< hint type=note >}}
|
{{< hint type=note >}}
|
||||||
Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials).
|
Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials).
|
||||||
|
|
||||||
To use redirects with default credentials assigned to a virtual machine you have to enable "IAM Service Account Credentials API" and grant `iam.serviceAccounts.signBlob` permission on the used service account.
|
|
||||||
|
|
||||||
To use redirects with default credentials from Google Cloud CLI, in addition to the permissions mentioned above, you have to [impersonate the service account intended to be used by the registry](https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account).
|
|
||||||
{{< /hint >}}
|
{{< /hint >}}
|
||||||
|
|
|
@ -7,7 +7,7 @@ title: In-memory storage driver (testing only)
|
||||||
For purely tests purposes, you can use the `inmemory` storage driver. This
|
For purely tests purposes, you can use the `inmemory` storage driver. This
|
||||||
driver is an implementation of the `storagedriver.StorageDriver` interface which
|
driver is an implementation of the `storagedriver.StorageDriver` interface which
|
||||||
uses local memory for object storage. If you would like to run a registry from
|
uses local memory for object storage. If you would like to run a registry from
|
||||||
volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk.
|
volatile memory, use the [`filesystem` driver](../filesystem) on a ramdisk.
|
||||||
|
|
||||||
{{< hint type=important >}}
|
{{< hint type=important >}}
|
||||||
This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production.
|
This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production.
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
---
|
|
||||||
description: Explains how to use storage middleware
|
|
||||||
keywords: registry, on-prem, images, tags, repository, distribution, storage drivers, advanced
|
|
||||||
title: Storage middleware
|
|
||||||
---
|
|
||||||
|
|
||||||
This document describes the registry storage middleware.
|
|
||||||
|
|
||||||
## Provided middleware
|
|
||||||
|
|
||||||
This storage driver package comes bundled with several middleware options:
|
|
||||||
|
|
||||||
- cloudfront
|
|
||||||
- redirect
|
|
||||||
- [rewrite](rewrite): Partially rewrites the URL returned by the storage driver.
|
|
|
@ -1,32 +0,0 @@
|
||||||
---
|
|
||||||
description: Explains how to use the rewrite storage middleware
|
|
||||||
keywords: registry, service, driver, images, storage, middleware, rewrite
|
|
||||||
title: Rewrite middleware
|
|
||||||
---
|
|
||||||
|
|
||||||
A storage middleware which allows to rewrite the URL returned by the storage driver.
|
|
||||||
|
|
||||||
For example, it can be used to rewrite the Blob Storage URL returned by the Azure Blob Storage driver to use Azure CDN.
|
|
||||||
|
|
||||||
## Parameters
|
|
||||||
|
|
||||||
* `scheme`: (optional): Rewrite the returned URL scheme (if set).
|
|
||||||
* `host`: (optional): Rewrite the returned URL host (if set).
|
|
||||||
* `trimpathprefix` (optional): Trim the prefix from the returned URL path (if set).
|
|
||||||
|
|
||||||
## Example configuration
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
storage:
|
|
||||||
azure:
|
|
||||||
accountname: "ACCOUNT_NAME"
|
|
||||||
accountkey: "******"
|
|
||||||
container: container-name
|
|
||||||
middleware:
|
|
||||||
storage:
|
|
||||||
- name: rewrite
|
|
||||||
options:
|
|
||||||
scheme: https
|
|
||||||
host: example-cdn-endpoint.azurefd.net
|
|
||||||
trimpathprefix: /container-name
|
|
||||||
```
|
|
|
@ -15,7 +15,7 @@ Amazon S3 or S3 compatible services for object storage.
|
||||||
| `secretkey` | no | Your AWS Secret Key. If you use [IAM roles](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), omit to fetch temporary credentials from IAM. |
|
| `secretkey` | no | Your AWS Secret Key. If you use [IAM roles](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), omit to fetch temporary credentials from IAM. |
|
||||||
| `region` | yes | The AWS region in which your bucket exists. |
|
| `region` | yes | The AWS region in which your bucket exists. |
|
||||||
| `regionendpoint` | no | Endpoint for S3 compatible storage services (Minio, etc). |
|
| `regionendpoint` | no | Endpoint for S3 compatible storage services (Minio, etc). |
|
||||||
| `forcepathstyle` | no | To enable path-style addressing when the value is set to `true`. The default is `false`. |
|
| `forcepathstyle` | no | To enable path-style addressing when the value is set to `true`. The default is `true`. |
|
||||||
| `bucket` | yes | The bucket name in which you want to store the registry's data. |
|
| `bucket` | yes | The bucket name in which you want to store the registry's data. |
|
||||||
| `encrypt` | no | Specifies whether the registry stores the image in encrypted format or not. A boolean value. The default is `false`. |
|
| `encrypt` | no | Specifies whether the registry stores the image in encrypted format or not. A boolean value. The default is `false`. |
|
||||||
| `keyid` | no | Optional KMS key ID to use for encryption (encrypt must be true, or this parameter is ignored). The default is `none`. |
|
| `keyid` | no | Optional KMS key ID to use for encryption (encrypt must be true, or this parameter is ignored). The default is `none`. |
|
||||||
|
@ -43,7 +43,7 @@ Amazon S3 or S3 compatible services for object storage.
|
||||||
|
|
||||||
`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3.
|
`regionendpoint`: (optional) Endpoint URL for S3 compatible APIs. This should not be provided when using Amazon S3.
|
||||||
|
|
||||||
`forcepathstyle`: (optional) Force path style for S3 compatible APIs. Some manufacturers only support force path style, while others only support DNS based bucket routing. Amazon S3 supports both. The value of this parameter applies, regardless of the region settings.
|
`forcepathstyle`: (optional) The force path style for S3 compatible APIs. Some manufacturers only support force path style, while others only support DNS based bucket routing. Amazon S3 supports both.
|
||||||
|
|
||||||
`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization.
|
`bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization.
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ This repository provides container images for the Open Source Registry implement
|
||||||
|
|
||||||
<img src="https://raw.githubusercontent.com/distribution/distribution/main/distribution-logo.svg" width="200px" />
|
<img src="https://raw.githubusercontent.com/distribution/distribution/main/distribution-logo.svg" width="200px" />
|
||||||
|
|
||||||
[![Build Status](https://github.com/distribution/distribution/workflows/build/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions/workflows/build.yml?query=workflow%3Abuild)
|
[![Build Status](https://github.com/distribution/distribution/workflows/CI/badge.svg?branch=main&event=push)](https://github.com/distribution/distribution/actions?query=workflow%3ACI)
|
||||||
[![OCI Conformance](https://github.com/distribution/distribution/workflows/conformance/badge.svg)](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance)
|
[![OCI Conformance](https://github.com/distribution/distribution/workflows/conformance/badge.svg)](https://github.com/distribution/distribution/actions?query=workflow%3Aconformance)
|
||||||
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
|
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE)
|
||||||
|
|
||||||
|
@ -31,12 +31,12 @@ docker tag alpine localhost:5000/alpine
|
||||||
docker push localhost:5000/alpine
|
docker push localhost:5000/alpine
|
||||||
```
|
```
|
||||||
|
|
||||||
⚠️ Beware the default configuration uses [`filesystem` storage driver](https://github.com/distribution/distribution/blob/main/docs/content/storage-drivers/filesystem.md)
|
⚠️ Beware the default configuration uses [`filesystem` storage driver](https://github.com/distribution/distribution/blob/main/docs/storage-drivers/filesystem.md)
|
||||||
and the above example command does not mount a local filesystem volume into the running container.
|
and the above example command does not mount a local filesystem volume into the running container.
|
||||||
If you wish to mount the local filesystem to the `rootdirectory` of the
|
If you wish to mount the local filesystem to the `rootdirectory` of the
|
||||||
`filesystem` storage driver run the following command:
|
`filesystem` storage driver run the following command:
|
||||||
```
|
```
|
||||||
docker run -d -p 5000:5000 -v $PWD/FS/PATH:/var/lib/registry --restart always --name registry distribution/distribution:edge
|
docker run -d -p 5000:5000 $PWD/FS/PATH:/var/lib/registry --restart always --name registry distribution/distribution:edge
|
||||||
```
|
```
|
||||||
|
|
||||||
### Custom configuration
|
### Custom configuration
|
||||||
|
@ -44,7 +44,7 @@ docker run -d -p 5000:5000 -v $PWD/FS/PATH:/var/lib/registry --restart always --
|
||||||
If you don't wan to use the default configuration file, you can supply
|
If you don't wan to use the default configuration file, you can supply
|
||||||
your own custom configuration file as follows:
|
your own custom configuration file as follows:
|
||||||
```
|
```
|
||||||
docker run -d -p 5000:5000 -v $PWD/PATH/TO/config.yml:/etc/distribution/config.yml --restart always --name registry distribution/distribution:edge
|
docker run -d -p 5000:5000 $PWD/PATH/TO/config.yml:/etc/docker/registry/config.yml --restart always --name registry distribution/distribution:edge
|
||||||
```
|
```
|
||||||
|
|
||||||
## Communication
|
## Communication
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
baseURL: https://distribution.github.io/distribution
|
baseURL: /
|
||||||
languageCode: en-us
|
languageCode: en-us
|
||||||
title: CNCF Distribution
|
title: CNCF Distribution
|
||||||
theme: hugo-geekdoc
|
theme: hugo-geekdoc
|
||||||
|
@ -22,7 +22,3 @@ disablePathToLower: true
|
||||||
params:
|
params:
|
||||||
geekdocRepo: "https://github.com/distribution/distribution"
|
geekdocRepo: "https://github.com/distribution/distribution"
|
||||||
geekdocEditPath: edit/main/docs
|
geekdocEditPath: edit/main/docs
|
||||||
geekdocLegalNotice: "https://www.linuxfoundation.org/legal/trademark-usage"
|
|
||||||
geekdocContentLicense:
|
|
||||||
name: CC BY 4.0
|
|
||||||
link: https://creativecommons.org/licenses/by/4.0/
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
footer_legal_notice: Trademarks
|
|
|
@ -1,5 +0,0 @@
|
||||||
{{- if (strings.HasPrefix .Destination "http") -}}
|
|
||||||
<a href="{{ safe.URL .Destination }}" target="_blank">{{ safe.HTML .Text }}</a>
|
|
||||||
{{- else -}}
|
|
||||||
<a href="{{ ref .Page .Destination | safe.URL }}">{{ safe.HTML .Text }}</a>
|
|
||||||
{{- end -}}
|
|
111
go.mod
111
go.mod
|
@ -1,26 +1,31 @@
|
||||||
module github.com/distribution/distribution/v3
|
module github.com/distribution/distribution/v3
|
||||||
|
|
||||||
go 1.22.5
|
go 1.21
|
||||||
|
|
||||||
|
toolchain go1.21.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.30.1
|
cloud.google.com/go/storage v1.30.1
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20221103172237-443f56ff4ba8
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230825064515-46a214d065f8
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
|
||||||
github.com/aws/aws-sdk-go v1.48.10
|
github.com/aws/aws-sdk-go v1.48.10
|
||||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0
|
github.com/bshuster-repo/logrus-logstash-hook v1.0.0
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0
|
github.com/coreos/go-systemd/v22 v22.5.0
|
||||||
github.com/distribution/reference v0.6.0
|
github.com/distribution/reference v0.5.0
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
||||||
github.com/docker/go-metrics v0.0.1
|
github.com/docker/go-metrics v0.0.1
|
||||||
github.com/go-jose/go-jose/v4 v4.0.2
|
github.com/go-jose/go-jose/v3 v3.0.1
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/handlers v1.5.2
|
github.com/gorilla/handlers v1.5.1
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
||||||
github.com/klauspost/compress v1.17.4
|
github.com/klauspost/compress v1.17.4
|
||||||
github.com/mitchellh/mapstructure v1.5.0
|
github.com/mitchellh/mapstructure v1.1.2
|
||||||
|
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.1.0
|
github.com/opencontainers/image-spec v1.1.0
|
||||||
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5
|
github.com/redis/go-redis/extra/redisotel/v9 v9.0.5
|
||||||
|
@ -28,14 +33,10 @@ require (
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
|
github.com/testcontainers/testcontainers-go v0.29.1
|
||||||
go.opentelemetry.io/contrib/exporters/autoexport v0.46.1
|
go.opentelemetry.io/contrib/exporters/autoexport v0.46.1
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
|
|
||||||
go.opentelemetry.io/otel v1.21.0
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0
|
|
||||||
go.opentelemetry.io/otel/sdk v1.21.0
|
go.opentelemetry.io/otel/sdk v1.21.0
|
||||||
go.opentelemetry.io/otel/trace v1.21.0
|
golang.org/x/crypto v0.17.0
|
||||||
golang.org/x/crypto v0.24.0
|
|
||||||
golang.org/x/net v0.26.0
|
|
||||||
golang.org/x/oauth2 v0.11.0
|
golang.org/x/oauth2 v0.11.0
|
||||||
google.golang.org/api v0.126.0
|
google.golang.org/api v0.126.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
@ -46,30 +47,83 @@ require (
|
||||||
cloud.google.com/go/compute v1.23.0 // indirect
|
cloud.google.com/go/compute v1.23.0 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||||
cloud.google.com/go/iam v1.1.1 // indirect
|
cloud.google.com/go/iam v1.1.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
dario.cat/mergo v1.0.0 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240327095603-491a47e7fe24 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||||
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
|
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||||
|
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||||
|
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||||
|
github.com/containerd/containerd v1.7.12 // indirect
|
||||||
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
|
github.com/docker/docker v25.0.3+incompatible // indirect
|
||||||
|
github.com/docker/go-connections v0.5.0 // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
|
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||||
|
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
|
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||||
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||||
|
github.com/moby/sys/user v0.1.0 // indirect
|
||||||
|
github.com/moby/term v0.5.0 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
||||||
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
|
||||||
|
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
|
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||||
|
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||||
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
|
github.com/urfave/cli v1.22.12 // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||||
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
|
go.uber.org/zap v1.24.0
|
||||||
|
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||||
|
golang.org/x/mod v0.16.0 // indirect
|
||||||
|
golang.org/x/tools v0.13.0 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/go-logr/logr v1.3.0 // indirect
|
github.com/go-logr/logr v1.3.0 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
|
||||||
github.com/google/s2a-go v0.1.4 // indirect
|
github.com/google/s2a-go v0.1.4 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
|
||||||
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.17.0 // indirect; updated to latest
|
github.com/prometheus/client_golang v1.17.0 // indirect; updated to latest
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
|
@ -78,6 +132,8 @@ require (
|
||||||
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
|
||||||
|
go.opentelemetry.io/otel v1.21.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||||
|
@ -85,12 +141,15 @@ require (
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect
|
go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
|
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||||
golang.org/x/sync v0.7.0
|
golang.org/x/net v0.18.0 // indirect
|
||||||
golang.org/x/sys v0.21.0 // indirect
|
golang.org/x/sync v0.3.0 // indirect
|
||||||
golang.org/x/text v0.16.0 // indirect
|
golang.org/x/sys v0.16.0 // indirect
|
||||||
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||||
|
|
|
@ -72,7 +72,7 @@ type Manager interface {
|
||||||
|
|
||||||
// AddResponse adds the response to the challenge
|
// AddResponse adds the response to the challenge
|
||||||
// manager. The challenges will be parsed out of
|
// manager. The challenges will be parsed out of
|
||||||
// the WWW-Authenticate headers and added to the
|
// the WWW-Authenicate headers and added to the
|
||||||
// URL which was produced the response. If the
|
// URL which was produced the response. If the
|
||||||
// response was authorized, any challenges for the
|
// response was authorized, any challenges for the
|
||||||
// endpoint will be cleared.
|
// endpoint will be cleared.
|
||||||
|
|
|
@ -29,9 +29,9 @@ var (
|
||||||
const defaultClientID = "registry-client"
|
const defaultClientID = "registry-client"
|
||||||
|
|
||||||
// AuthenticationHandler is an interface for authorizing a request from
|
// AuthenticationHandler is an interface for authorizing a request from
|
||||||
// params from a "WWW-Authenticate" header for a single scheme.
|
// params from a "WWW-Authenicate" header for a single scheme.
|
||||||
type AuthenticationHandler interface {
|
type AuthenticationHandler interface {
|
||||||
// Scheme returns the scheme as expected from the "WWW-Authenticate" header.
|
// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
|
||||||
Scheme() string
|
Scheme() string
|
||||||
|
|
||||||
// AuthorizeRequest adds the authorization header to a request (if needed)
|
// AuthorizeRequest adds the authorization header to a request (if needed)
|
||||||
|
|
|
@ -46,14 +46,8 @@ func parseHTTPErrorResponse(resp *http.Response) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
statusCode := resp.StatusCode
|
statusCode := resp.StatusCode
|
||||||
|
|
||||||
// A HEAD request for example validly does not contain any body, while
|
|
||||||
// still returning a JSON content-type.
|
|
||||||
if len(body) == 0 {
|
|
||||||
return makeError(statusCode, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctHeader := resp.Header.Get("Content-Type")
|
ctHeader := resp.Header.Get("Content-Type")
|
||||||
|
|
||||||
if ctHeader == "" {
|
if ctHeader == "" {
|
||||||
return makeError(statusCode, string(body))
|
return makeError(statusCode, string(body))
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,22 +57,6 @@ func TestHandleHTTPResponseError401WithInvalidBody(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleHTTPResponseError401WithNoBody(t *testing.T) {
|
|
||||||
json := ""
|
|
||||||
response := &http.Response{
|
|
||||||
Status: "401 Unauthorized",
|
|
||||||
StatusCode: 401,
|
|
||||||
Body: nopCloser{bytes.NewBufferString(json)},
|
|
||||||
Header: http.Header{"Content-Type": []string{"application/json; charset=utf-8"}},
|
|
||||||
}
|
|
||||||
err := HandleHTTPResponseError(response)
|
|
||||||
|
|
||||||
expectedMsg := "unauthorized: "
|
|
||||||
if !strings.Contains(err.Error(), expectedMsg) {
|
|
||||||
t.Errorf("Expected %q, got: %q", expectedMsg, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleHTTPResponseErrorExpectedStatusCode400ValidBody(t *testing.T) {
|
func TestHandleHTTPResponseErrorExpectedStatusCode400ValidBody(t *testing.T) {
|
||||||
json := `{"errors":[{"code":"DIGEST_INVALID","message":"provided digest does not match"}]}`
|
json := `{"errors":[{"code":"DIGEST_INVALID","message":"provided digest does not match"}]}`
|
||||||
response := &http.Response{
|
response := &http.Response{
|
||||||
|
|
|
@ -18,13 +18,13 @@ import (
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/internal/dcontext"
|
"github.com/distribution/distribution/v3/internal/dcontext"
|
||||||
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/distribution/distribution/v3/manifest/ocischema"
|
"github.com/distribution/distribution/v3/manifest/ocischema"
|
||||||
"github.com/distribution/distribution/v3/registry/api/errcode"
|
"github.com/distribution/distribution/v3/registry/api/errcode"
|
||||||
"github.com/distribution/distribution/v3/testutil"
|
"github.com/distribution/distribution/v3/testutil"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -928,8 +928,10 @@ func newRandomOCIManifest(t *testing.T, blobCount int) (*ocischema.Manifest, dig
|
||||||
}
|
}
|
||||||
|
|
||||||
m := ocischema.Manifest{
|
m := ocischema.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: v1.MediaTypeImageManifest,
|
MediaType: v1.MediaTypeImageManifest,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
Size: 123,
|
Size: 123,
|
||||||
|
|
|
@ -26,6 +26,11 @@ var (
|
||||||
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
|
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ReadSeekCloser combines io.ReadSeeker with io.Closer.
|
||||||
|
//
|
||||||
|
// Deprecated: use [io.ReadSeekCloser].
|
||||||
|
type ReadSeekCloser = io.ReadSeekCloser
|
||||||
|
|
||||||
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
|
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
|
||||||
// request. When seeking and starting a read from a non-zero offset
|
// request. When seeking and starting a read from a non-zero offset
|
||||||
// the a "Range" header will be added which sets the offset.
|
// the a "Range" header will be added which sets the offset.
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/manifest"
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,37 +18,33 @@ const (
|
||||||
|
|
||||||
// SchemaVersion provides a pre-initialized version structure for this
|
// SchemaVersion provides a pre-initialized version structure for this
|
||||||
// packages version of the manifest.
|
// packages version of the manifest.
|
||||||
//
|
|
||||||
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
|
|
||||||
// to [MediaTypeManifestList].
|
|
||||||
//
|
|
||||||
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
|
|
||||||
var SchemaVersion = manifest.Versioned{
|
var SchemaVersion = manifest.Versioned{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MediaType: MediaTypeManifestList,
|
MediaType: MediaTypeManifestList,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if err := distribution.RegisterManifestSchema(MediaTypeManifestList, unmarshalManifestList); err != nil {
|
manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
m := new(DeserializedManifestList)
|
||||||
}
|
err := m.UnmarshalJSON(b)
|
||||||
}
|
if err != nil {
|
||||||
|
|
||||||
func unmarshalManifestList(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
|
||||||
m := &DeserializedManifestList{}
|
|
||||||
if err := m.UnmarshalJSON(b); err != nil {
|
|
||||||
return nil, distribution.Descriptor{}, err
|
return nil, distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.MediaType != MediaTypeManifestList {
|
if m.MediaType != MediaTypeManifestList {
|
||||||
return nil, distribution.Descriptor{}, fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", MediaTypeManifestList, m.MediaType)
|
err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'",
|
||||||
|
MediaTypeManifestList, m.MediaType)
|
||||||
|
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, distribution.Descriptor{
|
dgst := digest.FromBytes(b)
|
||||||
Digest: digest.FromBytes(b),
|
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
|
||||||
Size: int64(len(b)),
|
}
|
||||||
MediaType: MediaTypeManifestList,
|
err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
|
||||||
}, nil
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PlatformSpec specifies a platform where a particular image manifest is
|
// PlatformSpec specifies a platform where a particular image manifest is
|
||||||
|
@ -90,10 +85,7 @@ type ManifestDescriptor struct {
|
||||||
|
|
||||||
// ManifestList references manifests for various platforms.
|
// ManifestList references manifests for various platforms.
|
||||||
type ManifestList struct {
|
type ManifestList struct {
|
||||||
specs.Versioned
|
manifest.Versioned
|
||||||
|
|
||||||
// MediaType is the media type of this schema.
|
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
|
||||||
|
|
||||||
// Manifests references a list of manifests
|
// Manifests references a list of manifests
|
||||||
Manifests []ManifestDescriptor `json:"manifests"`
|
Manifests []ManifestDescriptor `json:"manifests"`
|
||||||
|
@ -136,8 +128,10 @@ func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestLis
|
||||||
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
|
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
|
||||||
func fromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) {
|
func fromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) {
|
||||||
m := ManifestList{
|
m := ManifestList{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: SchemaVersion.SchemaVersion,
|
||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Manifests = make([]ManifestDescriptor, len(descriptors))
|
m.Manifests = make([]ManifestDescriptor, len(descriptors))
|
||||||
|
@ -182,14 +176,7 @@ func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
|
||||||
// Payload returns the raw content of the manifest list. The contents can be
|
// Payload returns the raw content of the manifest list. The contents can be
|
||||||
// used to calculate the content identifier.
|
// used to calculate the content identifier.
|
||||||
func (m DeserializedManifestList) Payload() (string, []byte, error) {
|
func (m DeserializedManifestList) Payload() (string, []byte, error) {
|
||||||
var mediaType string
|
return m.MediaType, m.canonical, nil
|
||||||
if m.MediaType == "" {
|
|
||||||
mediaType = v1.MediaTypeImageIndex
|
|
||||||
} else {
|
|
||||||
mediaType = m.MediaType
|
|
||||||
}
|
|
||||||
|
|
||||||
return mediaType, m.canonical, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateManifestList returns an error if the byte slice is invalid JSON or if it
|
// validateManifestList returns an error if the byte slice is invalid JSON or if it
|
||||||
|
|
|
@ -5,8 +5,8 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ type Builder struct {
|
||||||
// NewManifestBuilder is used to build new manifests for the current schema
|
// NewManifestBuilder is used to build new manifests for the current schema
|
||||||
// version. It takes a BlobService so it can publish the configuration blob
|
// version. It takes a BlobService so it can publish the configuration blob
|
||||||
// as part of the Build process, and annotations.
|
// as part of the Build process, and annotations.
|
||||||
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) *Builder {
|
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder {
|
||||||
mb := &Builder{
|
mb := &Builder{
|
||||||
bs: bs,
|
bs: bs,
|
||||||
configJSON: make([]byte, len(configJSON)),
|
configJSON: make([]byte, len(configJSON)),
|
||||||
|
@ -58,8 +58,10 @@ func (mb *Builder) SetMediaType(mediaType string) error {
|
||||||
// Build produces a final manifest from the given references.
|
// Build produces a final manifest from the given references.
|
||||||
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
m := Manifest{
|
m := Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: mb.mediaType,
|
MediaType: mb.mediaType,
|
||||||
|
},
|
||||||
Layers: make([]distribution.Descriptor, len(mb.layers)),
|
Layers: make([]distribution.Descriptor, len(mb.layers)),
|
||||||
Annotations: mb.annotations,
|
Annotations: mb.annotations,
|
||||||
}
|
}
|
||||||
|
@ -94,8 +96,8 @@ func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendReference adds a reference to the current ManifestBuilder.
|
// AppendReference adds a reference to the current ManifestBuilder.
|
||||||
func (mb *Builder) AppendReference(ref distribution.Descriptor) error {
|
func (mb *Builder) AppendReference(d distribution.Describable) error {
|
||||||
mb.layers = append(mb.layers, ref)
|
mb.layers = append(mb.layers, d.Descriptor())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,56 +8,51 @@ import (
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/manifest"
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndexSchemaVersion provides a pre-initialized version structure for OCI Image
|
// IndexSchemaVersion provides a pre-initialized version structure for OCI Image
|
||||||
// Indices.
|
// Indices.
|
||||||
//
|
|
||||||
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
|
|
||||||
// to [v1.MediaTypeImageIndex].
|
|
||||||
//
|
|
||||||
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
|
|
||||||
var IndexSchemaVersion = manifest.Versioned{
|
var IndexSchemaVersion = manifest.Versioned{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MediaType: v1.MediaTypeImageIndex,
|
MediaType: v1.MediaTypeImageIndex,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if err := distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, unmarshalImageIndex); err != nil {
|
imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalImageIndex(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
|
||||||
if err := validateIndex(b); err != nil {
|
if err := validateIndex(b); err != nil {
|
||||||
return nil, distribution.Descriptor{}, err
|
return nil, distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
m := new(DeserializedImageIndex)
|
||||||
m := &DeserializedImageIndex{}
|
err := m.UnmarshalJSON(b)
|
||||||
if err := m.UnmarshalJSON(b); err != nil {
|
if err != nil {
|
||||||
return nil, distribution.Descriptor{}, err
|
return nil, distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
|
if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
|
||||||
return nil, distribution.Descriptor{}, fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", v1.MediaTypeImageIndex, m.MediaType)
|
err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'",
|
||||||
|
v1.MediaTypeImageIndex, m.MediaType)
|
||||||
|
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dgst := digest.FromBytes(b)
|
||||||
return m, distribution.Descriptor{
|
return m, distribution.Descriptor{
|
||||||
MediaType: v1.MediaTypeImageIndex,
|
MediaType: v1.MediaTypeImageIndex,
|
||||||
Digest: digest.FromBytes(b),
|
Digest: dgst,
|
||||||
Size: int64(len(b)),
|
Size: int64(len(b)),
|
||||||
Annotations: m.Annotations,
|
Annotations: m.Annotations,
|
||||||
}, nil
|
}, err
|
||||||
|
}
|
||||||
|
err := distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageIndex references manifests for various platforms.
|
// ImageIndex references manifests for various platforms.
|
||||||
type ImageIndex struct {
|
type ImageIndex struct {
|
||||||
specs.Versioned
|
manifest.Versioned
|
||||||
|
|
||||||
// MediaType is the media type of this schema.
|
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
|
||||||
|
|
||||||
// Manifests references a list of manifests
|
// Manifests references a list of manifests
|
||||||
Manifests []distribution.Descriptor `json:"manifests"`
|
Manifests []distribution.Descriptor `json:"manifests"`
|
||||||
|
@ -93,8 +88,10 @@ func FromDescriptors(descriptors []distribution.Descriptor, annotations map[stri
|
||||||
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
|
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
|
||||||
func fromDescriptorsWithMediaType(descriptors []distribution.Descriptor, annotations map[string]string, mediaType string) (_ *DeserializedImageIndex, err error) {
|
func fromDescriptorsWithMediaType(descriptors []distribution.Descriptor, annotations map[string]string, mediaType string) (_ *DeserializedImageIndex, err error) {
|
||||||
m := ImageIndex{
|
m := ImageIndex{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: IndexSchemaVersion.SchemaVersion,
|
||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
|
},
|
||||||
Annotations: annotations,
|
Annotations: annotations,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,52 +8,44 @@ import (
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/manifest"
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SchemaVersion provides a pre-initialized version structure for OCI Image
|
// SchemaVersion provides a pre-initialized version structure for OCI Image
|
||||||
// Manifests.
|
// Manifests
|
||||||
//
|
|
||||||
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
|
|
||||||
// to [v1.MediaTypeImageManifest].
|
|
||||||
//
|
|
||||||
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
|
|
||||||
var SchemaVersion = manifest.Versioned{
|
var SchemaVersion = manifest.Versioned{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MediaType: v1.MediaTypeImageManifest,
|
MediaType: v1.MediaTypeImageManifest,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, unmarshalOCISchema); err != nil {
|
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
|
if err := validateManifest(b); err != nil {
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
|
}
|
||||||
|
m := new(DeserializedManifest)
|
||||||
|
err := m.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst := digest.FromBytes(b)
|
||||||
|
return m, distribution.Descriptor{
|
||||||
|
MediaType: v1.MediaTypeImageManifest,
|
||||||
|
Digest: dgst,
|
||||||
|
Size: int64(len(b)),
|
||||||
|
Annotations: m.Annotations,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc)
|
||||||
|
if err != nil {
|
||||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalOCISchema(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
|
||||||
if err := validateManifest(b); err != nil {
|
|
||||||
return nil, distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &DeserializedManifest{}
|
|
||||||
if err := m.UnmarshalJSON(b); err != nil {
|
|
||||||
return nil, distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, distribution.Descriptor{
|
|
||||||
MediaType: v1.MediaTypeImageManifest,
|
|
||||||
Digest: digest.FromBytes(b),
|
|
||||||
Size: int64(len(b)),
|
|
||||||
Annotations: m.Annotations,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manifest defines a ocischema manifest.
|
// Manifest defines a ocischema manifest.
|
||||||
type Manifest struct {
|
type Manifest struct {
|
||||||
specs.Versioned
|
manifest.Versioned
|
||||||
|
|
||||||
// MediaType is the media type of this schema.
|
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
|
||||||
|
|
||||||
// Config references the image configuration as a blob.
|
// Config references the image configuration as a blob.
|
||||||
Config distribution.Descriptor `json:"config"`
|
Config distribution.Descriptor `json:"config"`
|
||||||
|
@ -133,7 +125,7 @@ func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
// Payload returns the raw content of the manifest. The contents can be used to
|
// Payload returns the raw content of the manifest. The contents can be used to
|
||||||
// calculate the content identifier.
|
// calculate the content identifier.
|
||||||
func (m *DeserializedManifest) Payload() (string, []byte, error) {
|
func (m DeserializedManifest) Payload() (string, []byte, error) {
|
||||||
return v1.MediaTypeImageManifest, m.canonical, nil
|
return v1.MediaTypeImageManifest, m.canonical, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/distribution/distribution/v3/manifest/manifestlist"
|
"github.com/distribution/distribution/v3/manifest/manifestlist"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
@ -42,8 +42,10 @@ const expectedManifestSerialization = `{
|
||||||
|
|
||||||
func makeTestManifest(mediaType string) Manifest {
|
func makeTestManifest(mediaType string) Manifest {
|
||||||
return Manifest{
|
return Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
MediaType: v1.MediaTypeImageConfig,
|
MediaType: v1.MediaTypeImageConfig,
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
|
|
|
@ -4,11 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Builder is a type for constructing manifests.
|
// builder is a type for constructing manifests.
|
||||||
type Builder struct {
|
type builder struct {
|
||||||
// configDescriptor is used to describe configuration
|
// configDescriptor is used to describe configuration
|
||||||
configDescriptor distribution.Descriptor
|
configDescriptor distribution.Descriptor
|
||||||
|
|
||||||
|
@ -23,8 +22,8 @@ type Builder struct {
|
||||||
// NewManifestBuilder is used to build new manifests for the current schema
|
// NewManifestBuilder is used to build new manifests for the current schema
|
||||||
// version. It takes a BlobService so it can publish the configuration blob
|
// version. It takes a BlobService so it can publish the configuration blob
|
||||||
// as part of the Build process.
|
// as part of the Build process.
|
||||||
func NewManifestBuilder(configDescriptor distribution.Descriptor, configJSON []byte) *Builder {
|
func NewManifestBuilder(configDescriptor distribution.Descriptor, configJSON []byte) distribution.ManifestBuilder {
|
||||||
mb := &Builder{
|
mb := &builder{
|
||||||
configDescriptor: configDescriptor,
|
configDescriptor: configDescriptor,
|
||||||
configJSON: make([]byte, len(configJSON)),
|
configJSON: make([]byte, len(configJSON)),
|
||||||
}
|
}
|
||||||
|
@ -34,10 +33,9 @@ func NewManifestBuilder(configDescriptor distribution.Descriptor, configJSON []b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build produces a final manifest from the given references.
|
// Build produces a final manifest from the given references.
|
||||||
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
m := Manifest{
|
m := Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: defaultSchemaVersion},
|
Versioned: SchemaVersion,
|
||||||
MediaType: defaultMediaType,
|
|
||||||
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
|
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
|
||||||
}
|
}
|
||||||
copy(m.Layers, mb.dependencies)
|
copy(m.Layers, mb.dependencies)
|
||||||
|
@ -48,12 +46,12 @@ func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppendReference adds a reference to the current ManifestBuilder.
|
// AppendReference adds a reference to the current ManifestBuilder.
|
||||||
func (mb *Builder) AppendReference(ref distribution.Descriptor) error {
|
func (mb *builder) AppendReference(d distribution.Describable) error {
|
||||||
mb.dependencies = append(mb.dependencies, ref)
|
mb.dependencies = append(mb.dependencies, d.Descriptor())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// References returns the current references added to this builder.
|
// References returns the current references added to this builder.
|
||||||
func (mb *Builder) References() []distribution.Descriptor {
|
func (mb *builder) References() []distribution.Descriptor {
|
||||||
return mb.dependencies
|
return mb.dependencies
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/manifest"
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -34,48 +33,33 @@ const (
|
||||||
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
|
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
defaultSchemaVersion = 2
|
|
||||||
defaultMediaType = MediaTypeManifest
|
|
||||||
)
|
|
||||||
|
|
||||||
// SchemaVersion provides a pre-initialized version structure for this
|
// SchemaVersion provides a pre-initialized version structure for this
|
||||||
// packages version of the manifest.
|
// packages version of the manifest.
|
||||||
//
|
|
||||||
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
|
|
||||||
// to [MediaTypeManifest].
|
|
||||||
//
|
|
||||||
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
|
|
||||||
var SchemaVersion = manifest.Versioned{
|
var SchemaVersion = manifest.Versioned{
|
||||||
SchemaVersion: defaultSchemaVersion,
|
SchemaVersion: 2,
|
||||||
MediaType: defaultMediaType,
|
MediaType: MediaTypeManifest,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if err := distribution.RegisterManifestSchema(defaultMediaType, unmarshalSchema2); err != nil {
|
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||||
|
m := new(DeserializedManifest)
|
||||||
|
err := m.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, distribution.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst := digest.FromBytes(b)
|
||||||
|
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
|
||||||
|
}
|
||||||
|
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
|
||||||
|
if err != nil {
|
||||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalSchema2(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
|
||||||
m := &DeserializedManifest{}
|
|
||||||
if err := m.UnmarshalJSON(b); err != nil {
|
|
||||||
return nil, distribution.Descriptor{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, distribution.Descriptor{
|
|
||||||
Digest: digest.FromBytes(b),
|
|
||||||
Size: int64(len(b)),
|
|
||||||
MediaType: defaultMediaType,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manifest defines a schema2 manifest.
|
// Manifest defines a schema2 manifest.
|
||||||
type Manifest struct {
|
type Manifest struct {
|
||||||
specs.Versioned
|
manifest.Versioned
|
||||||
|
|
||||||
// MediaType is the media type of this schema.
|
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
|
||||||
|
|
||||||
// Config references the image configuration as a blob.
|
// Config references the image configuration as a blob.
|
||||||
Config distribution.Descriptor `json:"config"`
|
Config distribution.Descriptor `json:"config"`
|
||||||
|
@ -130,8 +114,9 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if mfst.MediaType != defaultMediaType {
|
if mfst.MediaType != MediaTypeManifest {
|
||||||
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", defaultMediaType, mfst.MediaType)
|
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
|
||||||
|
MediaTypeManifest, mfst.MediaType)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Manifest = mfst
|
m.Manifest = mfst
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
)
|
)
|
||||||
|
|
||||||
const expectedManifestSerialization = `{
|
const expectedManifestSerialization = `{
|
||||||
|
@ -29,8 +29,10 @@ const expectedManifestSerialization = `{
|
||||||
|
|
||||||
func makeTestManifest(mediaType string) Manifest {
|
func makeTestManifest(mediaType string) Manifest {
|
||||||
return Manifest{
|
return Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
MediaType: MediaTypeImageConfig,
|
MediaType: MediaTypeImageConfig,
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
|
|
|
@ -3,8 +3,6 @@ package manifest
|
||||||
// Versioned provides a struct with the manifest schemaVersion and mediaType.
|
// Versioned provides a struct with the manifest schemaVersion and mediaType.
|
||||||
// Incoming content with unknown schema version can be decoded against this
|
// Incoming content with unknown schema version can be decoded against this
|
||||||
// struct to check the version.
|
// struct to check the version.
|
||||||
//
|
|
||||||
// Deprecated: use [specs.Versioned] and set MediaType on the Manifest itself.
|
|
||||||
type Versioned struct {
|
type Versioned struct {
|
||||||
// SchemaVersion is the image manifest schema that this image follows
|
// SchemaVersion is the image manifest schema that this image follows
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
|
29
manifests.go
29
manifests.go
|
@ -26,7 +26,28 @@ type Manifest interface {
|
||||||
Payload() (mediaType string, payload []byte, err error)
|
Payload() (mediaType string, payload []byte, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ManifestService describes operations on manifests.
|
// ManifestBuilder creates a manifest allowing one to include dependencies.
|
||||||
|
// Instances can be obtained from a version-specific manifest package. Manifest
|
||||||
|
// specific data is passed into the function which creates the builder.
|
||||||
|
type ManifestBuilder interface {
|
||||||
|
// Build creates the manifest from his builder.
|
||||||
|
Build(ctx context.Context) (Manifest, error)
|
||||||
|
|
||||||
|
// References returns a list of objects which have been added to this
|
||||||
|
// builder. The dependencies are returned in the order they were added,
|
||||||
|
// which should be from base to head.
|
||||||
|
References() []Descriptor
|
||||||
|
|
||||||
|
// AppendReference includes the given object in the manifest after any
|
||||||
|
// existing dependencies. If the add fails, such as when adding an
|
||||||
|
// unsupported dependency, an error may be returned.
|
||||||
|
//
|
||||||
|
// The destination of the reference is dependent on the manifest type and
|
||||||
|
// the dependency type.
|
||||||
|
AppendReference(dependency Describable) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ManifestService describes operations on image manifests.
|
||||||
type ManifestService interface {
|
type ManifestService interface {
|
||||||
// Exists returns true if the manifest exists.
|
// Exists returns true if the manifest exists.
|
||||||
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
|
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
|
||||||
|
@ -48,12 +69,8 @@ type ManifestEnumerator interface {
|
||||||
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
|
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describable is an interface for descriptors.
|
// Describable is an interface for descriptors
|
||||||
//
|
|
||||||
// Implementations of Describable are generally objects which can be
|
|
||||||
// described, not simply descriptors.
|
|
||||||
type Describable interface {
|
type Describable interface {
|
||||||
// Descriptor returns the descriptor.
|
|
||||||
Descriptor() Descriptor
|
Descriptor() Descriptor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,13 +4,13 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/distribution/distribution/v3/manifest/schema2"
|
"github.com/distribution/distribution/v3/manifest/schema2"
|
||||||
v2 "github.com/distribution/distribution/v3/registry/api/v2"
|
v2 "github.com/distribution/distribution/v3/registry/api/v2"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
events "github.com/docker/go-events"
|
events "github.com/docker/go-events"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@ var (
|
||||||
}
|
}
|
||||||
request = RequestRecord{}
|
request = RequestRecord{}
|
||||||
tag = "latest"
|
tag = "latest"
|
||||||
|
ociMediaType = v1.MediaTypeImageManifest
|
||||||
artifactType = "application/vnd.example.sbom.v1"
|
artifactType = "application/vnd.example.sbom.v1"
|
||||||
cfg = distribution.Descriptor{
|
cfg = distribution.Descriptor{
|
||||||
MediaType: artifactType,
|
MediaType: artifactType,
|
||||||
|
@ -142,13 +143,14 @@ func TestEventBridgeRepoDeleted(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestEnv(t *testing.T, fn testSinkFn) Listener {
|
func createTestEnv(t *testing.T, fn testSinkFn) Listener {
|
||||||
mfst := schema2.Manifest{
|
manifest := schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
MediaType: v1.MediaTypeImageManifest,
|
MediaType: ociMediaType,
|
||||||
|
},
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
}
|
}
|
||||||
|
|
||||||
deserializedManifest, err := schema2.FromStruct(mfst)
|
deserializedManifest, err := schema2.FromStruct(manifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("creating OCI manifest: %v", err)
|
t.Fatalf("creating OCI manifest: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"github.com/distribution/distribution/v3/testutil"
|
"github.com/distribution/distribution/v3/testutil"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestListener(t *testing.T) {
|
func TestListener(t *testing.T) {
|
||||||
|
@ -144,8 +143,7 @@ func checkTestRepository(t *testing.T, repository distribution.Repository, remov
|
||||||
}
|
}
|
||||||
|
|
||||||
m := schema2.Manifest{
|
m := schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: schema2.SchemaVersion,
|
||||||
MediaType: schema2.MediaTypeManifest,
|
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
MediaType: "foo/bar",
|
MediaType: "foo/bar",
|
||||||
Digest: configDgst,
|
Digest: configDgst,
|
||||||
|
|
|
@ -269,7 +269,7 @@ type RouteDescriptor struct {
|
||||||
// should match.
|
// should match.
|
||||||
Path string
|
Path string
|
||||||
|
|
||||||
// Entity should be a short, human-readable description of the object
|
// Entity should be a short, human-readalbe description of the object
|
||||||
// targeted by the endpoint.
|
// targeted by the endpoint.
|
||||||
Entity string
|
Entity string
|
||||||
|
|
||||||
|
|
|
@ -202,7 +202,7 @@ func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string,
|
||||||
return appendValuesURL(uploadURL, values...).String(), nil
|
return appendValuesURL(uploadURL, values...).String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// cloneRoute returns a clone of the named route from the router. Routes
|
// clondedRoute returns a clone of the named route from the router. Routes
|
||||||
// must be cloned to avoid modifying them during url generation.
|
// must be cloned to avoid modifying them during url generation.
|
||||||
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
|
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
|
||||||
route := new(mux.Route)
|
route := new(mux.Route)
|
||||||
|
|
|
@ -46,7 +46,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// InitFunc is the type of an AccessController factory function and is used
|
// InitFunc is the type of an AccessController factory function and is used
|
||||||
// to register the constructor for different AccessController backends.
|
// to register the constructor for different AccesController backends.
|
||||||
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
type InitFunc func(options map[string]interface{}) (AccessController, error)
|
||||||
|
|
||||||
var accessControllers map[string]InitFunc
|
var accessControllers map[string]InitFunc
|
||||||
|
@ -56,7 +56,7 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserInfo carries information about
|
// UserInfo carries information about
|
||||||
// an authenticated/authorized client.
|
// an autenticated/authorized client.
|
||||||
type UserInfo struct {
|
type UserInfo struct {
|
||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,19 +9,18 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3/registry/auth"
|
"github.com/distribution/distribution/v3/registry/auth"
|
||||||
"github.com/go-jose/go-jose/v4"
|
"github.com/go-jose/go-jose/v3"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// init handles registering the token auth backend.
|
// init handles registering the token auth backend.
|
||||||
func init() {
|
func init() {
|
||||||
if err := auth.Register("token", auth.InitFunc(newAccessController)); err != nil {
|
if err := auth.Register("token", auth.InitFunc(newAccessController)); err != nil {
|
||||||
logrus.Errorf("failed to register token auth: %v", err)
|
logrus.Errorf("tailed to register token auth: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +86,6 @@ type authChallenge struct {
|
||||||
err error
|
err error
|
||||||
realm string
|
realm string
|
||||||
autoRedirect bool
|
autoRedirect bool
|
||||||
autoRedirectPath string
|
|
||||||
service string
|
service string
|
||||||
accessSet accessSet
|
accessSet accessSet
|
||||||
}
|
}
|
||||||
|
@ -104,28 +102,13 @@ func (ac authChallenge) Status() int {
|
||||||
return http.StatusUnauthorized
|
return http.StatusUnauthorized
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAutoRedirectURL(r *http.Request, autoRedirectPath string) string {
|
|
||||||
scheme := "https"
|
|
||||||
|
|
||||||
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
|
|
||||||
scheme = forwardedProto
|
|
||||||
}
|
|
||||||
|
|
||||||
u := &url.URL{
|
|
||||||
Scheme: scheme,
|
|
||||||
Host: r.Host,
|
|
||||||
Path: autoRedirectPath,
|
|
||||||
}
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// challengeParams constructs the value to be used in
|
// challengeParams constructs the value to be used in
|
||||||
// the WWW-Authenticate response challenge header.
|
// the WWW-Authenticate response challenge header.
|
||||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
func (ac authChallenge) challengeParams(r *http.Request) string {
|
func (ac authChallenge) challengeParams(r *http.Request) string {
|
||||||
var realm string
|
var realm string
|
||||||
if ac.autoRedirect {
|
if ac.autoRedirect {
|
||||||
realm = buildAutoRedirectURL(r, ac.autoRedirectPath)
|
realm = fmt.Sprintf("https://%s/auth/token", r.Host)
|
||||||
} else {
|
} else {
|
||||||
realm = ac.realm
|
realm = ac.realm
|
||||||
}
|
}
|
||||||
|
@ -144,7 +127,7 @@ func (ac authChallenge) challengeParams(r *http.Request) string {
|
||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHeaders sets the WWW-Authenticate value for the response.
|
// SetChallenge sets the WWW-Authenticate value for the response.
|
||||||
func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) {
|
func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) {
|
||||||
w.Header().Add("WWW-Authenticate", ac.challengeParams(r))
|
w.Header().Add("WWW-Authenticate", ac.challengeParams(r))
|
||||||
}
|
}
|
||||||
|
@ -153,29 +136,21 @@ func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) {
|
||||||
type accessController struct {
|
type accessController struct {
|
||||||
realm string
|
realm string
|
||||||
autoRedirect bool
|
autoRedirect bool
|
||||||
autoRedirectPath string
|
|
||||||
issuer string
|
issuer string
|
||||||
service string
|
service string
|
||||||
rootCerts *x509.CertPool
|
rootCerts *x509.CertPool
|
||||||
trustedKeys map[string]crypto.PublicKey
|
trustedKeys map[string]crypto.PublicKey
|
||||||
signingAlgorithms []jose.SignatureAlgorithm
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
defaultAutoRedirectPath = "/auth/token"
|
|
||||||
)
|
|
||||||
|
|
||||||
// tokenAccessOptions is a convenience type for handling
|
// tokenAccessOptions is a convenience type for handling
|
||||||
// options to the constructor of an accessController.
|
// options to the contstructor of an accessController.
|
||||||
type tokenAccessOptions struct {
|
type tokenAccessOptions struct {
|
||||||
realm string
|
realm string
|
||||||
autoRedirect bool
|
autoRedirect bool
|
||||||
autoRedirectPath string
|
|
||||||
issuer string
|
issuer string
|
||||||
service string
|
service string
|
||||||
rootCertBundle string
|
rootCertBundle string
|
||||||
jwks string
|
jwks string
|
||||||
signingAlgorithms []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkOptions gathers the necessary options
|
// checkOptions gathers the necessary options
|
||||||
|
@ -208,32 +183,10 @@ func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) {
|
||||||
if ok {
|
if ok {
|
||||||
autoRedirect, ok := autoRedirectVal.(bool)
|
autoRedirect, ok := autoRedirectVal.(bool)
|
||||||
if !ok {
|
if !ok {
|
||||||
return opts, errors.New("token auth requires a valid option bool: autoredirect")
|
return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect")
|
||||||
}
|
}
|
||||||
opts.autoRedirect = autoRedirect
|
opts.autoRedirect = autoRedirect
|
||||||
}
|
}
|
||||||
if opts.autoRedirect {
|
|
||||||
autoRedirectPathVal, ok := options["autoredirectpath"]
|
|
||||||
if ok {
|
|
||||||
autoRedirectPath, ok := autoRedirectPathVal.(string)
|
|
||||||
if !ok {
|
|
||||||
return opts, errors.New("token auth requires a valid option string: autoredirectpath")
|
|
||||||
}
|
|
||||||
opts.autoRedirectPath = autoRedirectPath
|
|
||||||
}
|
|
||||||
if opts.autoRedirectPath == "" {
|
|
||||||
opts.autoRedirectPath = defaultAutoRedirectPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
signingAlgos, ok := options["signingalgorithms"]
|
|
||||||
if ok {
|
|
||||||
signingAlgorithmsVals, ok := signingAlgos.([]string)
|
|
||||||
if !ok {
|
|
||||||
return opts, errors.New("signingalgorithms must be a list of signing algorithms")
|
|
||||||
}
|
|
||||||
opts.signingAlgorithms = signingAlgorithmsVals
|
|
||||||
}
|
|
||||||
|
|
||||||
return opts, nil
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
@ -290,18 +243,6 @@ func getJwks(path string) (*jose.JSONWebKeySet, error) {
|
||||||
return &jwks, nil
|
return &jwks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSigningAlgorithms(algos []string) ([]jose.SignatureAlgorithm, error) {
|
|
||||||
signAlgVals := make([]jose.SignatureAlgorithm, 0, len(algos))
|
|
||||||
for _, alg := range algos {
|
|
||||||
alg, ok := signingAlgorithms[alg]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unsupported signing algorithm: %s", alg)
|
|
||||||
}
|
|
||||||
signAlgVals = append(signAlgVals, alg)
|
|
||||||
}
|
|
||||||
return signAlgVals, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newAccessController creates an accessController using the given options.
|
// newAccessController creates an accessController using the given options.
|
||||||
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
|
||||||
config, err := checkOptions(options)
|
config, err := checkOptions(options)
|
||||||
|
@ -312,7 +253,6 @@ func newAccessController(options map[string]interface{}) (auth.AccessController,
|
||||||
var (
|
var (
|
||||||
rootCerts []*x509.Certificate
|
rootCerts []*x509.Certificate
|
||||||
jwks *jose.JSONWebKeySet
|
jwks *jose.JSONWebKeySet
|
||||||
signAlgos []jose.SignatureAlgorithm
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.rootCertBundle != "" {
|
if config.rootCertBundle != "" {
|
||||||
|
@ -346,25 +286,13 @@ func newAccessController(options map[string]interface{}) (auth.AccessController,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
signAlgos, err = getSigningAlgorithms(config.signingAlgorithms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(signAlgos) == 0 {
|
|
||||||
// NOTE: this is to maintain backwards compat
|
|
||||||
// with existing registry deployments
|
|
||||||
signAlgos = defaultSigningAlgorithms
|
|
||||||
}
|
|
||||||
|
|
||||||
return &accessController{
|
return &accessController{
|
||||||
realm: config.realm,
|
realm: config.realm,
|
||||||
autoRedirect: config.autoRedirect,
|
autoRedirect: config.autoRedirect,
|
||||||
autoRedirectPath: config.autoRedirectPath,
|
|
||||||
issuer: config.issuer,
|
issuer: config.issuer,
|
||||||
service: config.service,
|
service: config.service,
|
||||||
rootCerts: rootPool,
|
rootCerts: rootPool,
|
||||||
trustedKeys: trustedKeys,
|
trustedKeys: trustedKeys,
|
||||||
signingAlgorithms: signAlgos,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -374,7 +302,6 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac
|
||||||
challenge := &authChallenge{
|
challenge := &authChallenge{
|
||||||
realm: ac.realm,
|
realm: ac.realm,
|
||||||
autoRedirect: ac.autoRedirect,
|
autoRedirect: ac.autoRedirect,
|
||||||
autoRedirectPath: ac.autoRedirectPath,
|
|
||||||
service: ac.service,
|
service: ac.service,
|
||||||
accessSet: newAccessSet(accessItems...),
|
accessSet: newAccessSet(accessItems...),
|
||||||
}
|
}
|
||||||
|
@ -385,7 +312,7 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac
|
||||||
return nil, challenge
|
return nil, challenge
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err := NewToken(rawToken, ac.signingAlgorithms)
|
token, err := NewToken(rawToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
challenge.err = err
|
challenge.err = err
|
||||||
return nil, challenge
|
return nil, challenge
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
package token
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildAutoRedirectURL(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
reqGetter func() *http.Request
|
|
||||||
autoRedirectPath string
|
|
||||||
expectedURL string
|
|
||||||
}{{
|
|
||||||
name: "http",
|
|
||||||
reqGetter: func() *http.Request {
|
|
||||||
req := httptest.NewRequest("GET", "http://example.com/", nil)
|
|
||||||
return req
|
|
||||||
},
|
|
||||||
autoRedirectPath: "/auth",
|
|
||||||
expectedURL: "https://example.com/auth",
|
|
||||||
}, {
|
|
||||||
name: "x-forwarded",
|
|
||||||
reqGetter: func() *http.Request {
|
|
||||||
req := httptest.NewRequest("GET", "http://example.com/", nil)
|
|
||||||
req.Header.Set("X-Forwarded-Proto", "http")
|
|
||||||
return req
|
|
||||||
},
|
|
||||||
autoRedirectPath: "/auth/token",
|
|
||||||
expectedURL: "http://example.com/auth/token",
|
|
||||||
}}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
req := tc.reqGetter()
|
|
||||||
result := buildAutoRedirectURL(req, tc.autoRedirectPath)
|
|
||||||
if result != tc.expectedURL {
|
|
||||||
t.Errorf("expected %s, got %s", tc.expectedURL, result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckOptions(t *testing.T) {
|
|
||||||
realm := "https://auth.example.com/token/"
|
|
||||||
issuer := "test-issuer.example.com"
|
|
||||||
service := "test-service.example.com"
|
|
||||||
|
|
||||||
options := map[string]interface{}{
|
|
||||||
"realm": realm,
|
|
||||||
"issuer": issuer,
|
|
||||||
"service": service,
|
|
||||||
"rootcertbundle": "",
|
|
||||||
"autoredirect": true,
|
|
||||||
"autoredirectpath": "/auth",
|
|
||||||
}
|
|
||||||
|
|
||||||
ta, err := checkOptions(options)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if ta.autoRedirect != true {
|
|
||||||
t.Fatal("autoredirect should be true")
|
|
||||||
}
|
|
||||||
if ta.autoRedirectPath != "/auth" {
|
|
||||||
t.Fatal("autoredirectpath should be /auth")
|
|
||||||
}
|
|
||||||
|
|
||||||
options = map[string]interface{}{
|
|
||||||
"realm": realm,
|
|
||||||
"issuer": issuer,
|
|
||||||
"service": service,
|
|
||||||
"rootcertbundle": "",
|
|
||||||
"autoredirect": true,
|
|
||||||
"autoredirectforcetlsdisabled": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
ta, err = checkOptions(options)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if ta.autoRedirect != true {
|
|
||||||
t.Fatal("autoredirect should be true")
|
|
||||||
}
|
|
||||||
if ta.autoRedirectPath != "/auth/token" {
|
|
||||||
t.Fatal("autoredirectpath should be /auth/token")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||||
"github.com/go-jose/go-jose/v4"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func FuzzToken1(f *testing.F) {
|
func FuzzToken1(f *testing.F) {
|
||||||
|
@ -19,7 +18,7 @@ func FuzzToken1(f *testing.F) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
token, err := NewToken(rawToken, []jose.SignatureAlgorithm{jose.EdDSA, jose.RS384})
|
token, err := NewToken(rawToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v4"
|
"github.com/go-jose/go-jose/v3"
|
||||||
"github.com/go-jose/go-jose/v4/jwt"
|
"github.com/go-jose/go-jose/v3/jwt"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3/registry/auth"
|
"github.com/distribution/distribution/v3/registry/auth"
|
||||||
|
@ -23,38 +23,6 @@ const (
|
||||||
Leeway = 60 * time.Second
|
Leeway = 60 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var signingAlgorithms = map[string]jose.SignatureAlgorithm{
|
|
||||||
"EdDSA": jose.EdDSA,
|
|
||||||
"HS256": jose.HS256,
|
|
||||||
"HS384": jose.HS384,
|
|
||||||
"HS512": jose.HS512,
|
|
||||||
"RS256": jose.RS256,
|
|
||||||
"RS384": jose.RS384,
|
|
||||||
"RS512": jose.RS512,
|
|
||||||
"ES256": jose.ES256,
|
|
||||||
"ES384": jose.ES384,
|
|
||||||
"ES512": jose.ES512,
|
|
||||||
"PS256": jose.PS256,
|
|
||||||
"PS384": jose.PS384,
|
|
||||||
"PS512": jose.PS512,
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultSigningAlgorithms = []jose.SignatureAlgorithm{
|
|
||||||
jose.EdDSA,
|
|
||||||
jose.HS256,
|
|
||||||
jose.HS384,
|
|
||||||
jose.HS512,
|
|
||||||
jose.RS256,
|
|
||||||
jose.RS384,
|
|
||||||
jose.RS512,
|
|
||||||
jose.ES256,
|
|
||||||
jose.ES384,
|
|
||||||
jose.ES512,
|
|
||||||
jose.PS256,
|
|
||||||
jose.PS384,
|
|
||||||
jose.PS512,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors used by token parsing and verification.
|
// Errors used by token parsing and verification.
|
||||||
var (
|
var (
|
||||||
ErrMalformedToken = errors.New("malformed token")
|
ErrMalformedToken = errors.New("malformed token")
|
||||||
|
@ -101,8 +69,8 @@ type VerifyOptions struct {
|
||||||
|
|
||||||
// NewToken parses the given raw token string
|
// NewToken parses the given raw token string
|
||||||
// and constructs an unverified JSON Web Token.
|
// and constructs an unverified JSON Web Token.
|
||||||
func NewToken(rawToken string, signingAlgs []jose.SignatureAlgorithm) (*Token, error) {
|
func NewToken(rawToken string) (*Token, error) {
|
||||||
token, err := jwt.ParseSigned(rawToken, signingAlgs)
|
token, err := jwt.ParseSigned(rawToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ErrMalformedToken
|
return nil, ErrMalformedToken
|
||||||
}
|
}
|
||||||
|
@ -172,13 +140,6 @@ func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey crypto.Pu
|
||||||
// verifying the first one in the list only at the moment.
|
// verifying the first one in the list only at the moment.
|
||||||
header := t.JWT.Headers[0]
|
header := t.JWT.Headers[0]
|
||||||
|
|
||||||
signingKey, err = verifyCertChain(header, verifyOpts.Roots)
|
|
||||||
// NOTE(milosgajdos): if the x5c header is missing
|
|
||||||
// the token may have been signed by a JWKS.
|
|
||||||
if err != nil && err != jose.ErrMissingX5cHeader {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case header.JSONWebKey != nil:
|
case header.JSONWebKey != nil:
|
||||||
signingKey, err = verifyJWK(header, verifyOpts)
|
signingKey, err = verifyJWK(header, verifyOpts)
|
||||||
|
@ -188,7 +149,7 @@ func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey crypto.Pu
|
||||||
err = fmt.Errorf("token signed by untrusted key with ID: %q", header.KeyID)
|
err = fmt.Errorf("token signed by untrusted key with ID: %q", header.KeyID)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
err = ErrInvalidToken
|
signingKey, err = verifyCertChain(header, verifyOpts.Roots)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -265,7 +226,7 @@ func getCertPubKey(chains [][]*x509.Certificate) crypto.PublicKey {
|
||||||
|
|
||||||
// NOTE: we dont have to verify that the public key in the leaf cert
|
// NOTE: we dont have to verify that the public key in the leaf cert
|
||||||
// *is* the signing key: if it's not the signing then token claims
|
// *is* the signing key: if it's not the signing then token claims
|
||||||
// verification with this key fails
|
// verifcation with this key fails
|
||||||
return cert.PublicKey.(crypto.PublicKey)
|
return cert.PublicKey.(crypto.PublicKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3/registry/auth"
|
"github.com/distribution/distribution/v3/registry/auth"
|
||||||
"github.com/go-jose/go-jose/v4"
|
"github.com/go-jose/go-jose/v3"
|
||||||
"github.com/go-jose/go-jose/v4/jwt"
|
"github.com/go-jose/go-jose/v3/jwt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeRootKeys(numKeys int) ([]*ecdsa.PrivateKey, error) {
|
func makeRootKeys(numKeys int) ([]*ecdsa.PrivateKey, error) {
|
||||||
|
@ -123,12 +123,12 @@ func makeTestToken(jwk *jose.JSONWebKey, issuer, audience string, access []*Reso
|
||||||
Access: access,
|
Access: access,
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenString, err := jwt.Signed(signer).Claims(claimSet).Serialize()
|
tokenString, err := jwt.Signed(signer).Claims(claimSet).CompactSerialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to build token string: %v", err)
|
return nil, fmt.Errorf("unable to build token string: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewToken(tokenString, []jose.SignatureAlgorithm{signingKey.Algorithm})
|
return NewToken(tokenString)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE(milosgajdos): certTemplateInfo type as well
|
// NOTE(milosgajdos): certTemplateInfo type as well
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/configuration"
|
"github.com/distribution/distribution/v3/configuration"
|
||||||
|
"github.com/distribution/distribution/v3/manifest"
|
||||||
"github.com/distribution/distribution/v3/manifest/manifestlist"
|
"github.com/distribution/distribution/v3/manifest/manifestlist"
|
||||||
"github.com/distribution/distribution/v3/manifest/schema2"
|
"github.com/distribution/distribution/v3/manifest/schema2"
|
||||||
"github.com/distribution/distribution/v3/registry/api/errcode"
|
"github.com/distribution/distribution/v3/registry/api/errcode"
|
||||||
|
@ -32,7 +33,6 @@ import (
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var headerConfig = http.Header{
|
var headerConfig = http.Header{
|
||||||
|
@ -1577,8 +1577,10 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
|
||||||
// --------------------------------
|
// --------------------------------
|
||||||
// Attempt to push manifest with missing config and missing layers
|
// Attempt to push manifest with missing config and missing layers
|
||||||
manifest := &schema2.Manifest{
|
manifest := &schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: schema2.MediaTypeManifest,
|
MediaType: schema2.MediaTypeManifest,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
Size: 3253,
|
Size: 3253,
|
||||||
|
@ -1707,33 +1709,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
|
||||||
|
|
||||||
// ------------------
|
// ------------------
|
||||||
// Fetch by tag name
|
// Fetch by tag name
|
||||||
|
|
||||||
// HEAD requests should not contain a body
|
|
||||||
headReq, err := http.NewRequest(http.MethodHead, manifestURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error constructing request: %s", err)
|
|
||||||
}
|
|
||||||
headResp, err := http.DefaultClient.Do(headReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error head manifest: %v", err)
|
|
||||||
}
|
|
||||||
defer headResp.Body.Close()
|
|
||||||
|
|
||||||
checkResponse(t, "head uploaded manifest", headResp, http.StatusOK)
|
|
||||||
checkHeaders(t, headResp, http.Header{
|
|
||||||
"Docker-Content-Digest": []string{dgst.String()},
|
|
||||||
"ETag": []string{fmt.Sprintf(`"%s"`, dgst)},
|
|
||||||
})
|
|
||||||
|
|
||||||
headBody, err := io.ReadAll(headResp.Body)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("reading body for head manifest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(headBody) > 0 {
|
|
||||||
t.Fatalf("unexpected body length for head manifest: %d", len(headBody))
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, manifestURL, nil)
|
req, err := http.NewRequest(http.MethodGet, manifestURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error constructing request: %s", err)
|
t.Fatalf("Error constructing request: %s", err)
|
||||||
|
@ -1769,32 +1744,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
|
||||||
|
|
||||||
// ---------------
|
// ---------------
|
||||||
// Fetch by digest
|
// Fetch by digest
|
||||||
|
|
||||||
// HEAD requests should not contain a body
|
|
||||||
headReq, err = http.NewRequest(http.MethodHead, manifestDigestURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error constructing request: %s", err)
|
|
||||||
}
|
|
||||||
headResp, err = http.DefaultClient.Do(headReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error head manifest: %v", err)
|
|
||||||
}
|
|
||||||
defer headResp.Body.Close()
|
|
||||||
|
|
||||||
checkResponse(t, "head uploaded manifest by digest", headResp, http.StatusOK)
|
|
||||||
checkHeaders(t, headResp, http.Header{
|
|
||||||
"Docker-Content-Digest": []string{dgst.String()},
|
|
||||||
"ETag": []string{fmt.Sprintf(`"%s"`, dgst)},
|
|
||||||
})
|
|
||||||
|
|
||||||
headBody, err = io.ReadAll(headResp.Body)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("reading body for head manifest by digest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(headBody) > 0 {
|
|
||||||
t.Fatalf("unexpected body length for head manifest: %d", len(headBody))
|
|
||||||
}
|
|
||||||
req, err = http.NewRequest(http.MethodGet, manifestDigestURL, nil)
|
req, err = http.NewRequest(http.MethodGet, manifestDigestURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error constructing request: %s", err)
|
t.Fatalf("Error constructing request: %s", err)
|
||||||
|
@ -1898,8 +1847,10 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs)
|
||||||
// --------------------------------
|
// --------------------------------
|
||||||
// Attempt to push manifest list that refers to an unknown manifest
|
// Attempt to push manifest list that refers to an unknown manifest
|
||||||
manifestList := &manifestlist.ManifestList{
|
manifestList := &manifestlist.ManifestList{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: manifestlist.MediaTypeManifestList,
|
MediaType: manifestlist.MediaTypeManifestList,
|
||||||
|
},
|
||||||
Manifests: []manifestlist.ManifestDescriptor{
|
Manifests: []manifestlist.ManifestDescriptor{
|
||||||
{
|
{
|
||||||
Descriptor: distribution.Descriptor{
|
Descriptor: distribution.Descriptor{
|
||||||
|
@ -2510,7 +2461,7 @@ func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLB
|
||||||
|
|
||||||
func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) {
|
func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) {
|
||||||
if resp.StatusCode != expectedStatus {
|
if resp.StatusCode != expectedStatus {
|
||||||
t.Logf("unexpected status %s: expected %v, got %v", msg, resp.StatusCode, expectedStatus)
|
t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus)
|
||||||
maybeDumpResponse(t, resp)
|
maybeDumpResponse(t, resp)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
|
@ -2592,8 +2543,6 @@ func maybeDumpResponse(t *testing.T, resp *http.Response) {
|
||||||
// test will fail. If a passed in header value is "*", any non-zero value will
|
// test will fail. If a passed in header value is "*", any non-zero value will
|
||||||
// suffice as a match.
|
// suffice as a match.
|
||||||
func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) {
|
func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) {
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
for k, vs := range headers {
|
for k, vs := range headers {
|
||||||
if resp.Header.Get(k) == "" {
|
if resp.Header.Get(k) == "" {
|
||||||
t.Fatalf("response missing header %q", k)
|
t.Fatalf("response missing header %q", k)
|
||||||
|
@ -2635,8 +2584,10 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string)
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest := &schema2.Manifest{
|
manifest := &schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: schema2.MediaTypeManifest,
|
MediaType: schema2.MediaTypeManifest,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
Size: 3253,
|
Size: 3253,
|
||||||
|
@ -2730,8 +2681,10 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
|
||||||
|
|
||||||
// Manifest upload
|
// Manifest upload
|
||||||
manifest := &schema2.Manifest{
|
manifest := &schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: manifest.Versioned{
|
||||||
|
SchemaVersion: 2,
|
||||||
MediaType: schema2.MediaTypeManifest,
|
MediaType: schema2.MediaTypeManifest,
|
||||||
|
},
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
|
||||||
Size: 3253,
|
Size: 3253,
|
||||||
|
|
|
@ -3,8 +3,6 @@ package handlers
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"expvar"
|
"expvar"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
@ -79,7 +77,7 @@ type App struct {
|
||||||
source notifications.SourceRecord
|
source notifications.SourceRecord
|
||||||
}
|
}
|
||||||
|
|
||||||
redis redis.UniversalClient
|
redis *redis.Client
|
||||||
|
|
||||||
// isCache is true if this registry is configured as a pull through cache
|
// isCache is true if this registry is configured as a pull through cache
|
||||||
isCache bool
|
isCache bool
|
||||||
|
@ -116,7 +114,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
|
||||||
storageParams = make(configuration.Parameters)
|
storageParams = make(configuration.Parameters)
|
||||||
}
|
}
|
||||||
if storageParams["useragent"] == "" {
|
if storageParams["useragent"] == "" {
|
||||||
storageParams["useragent"] = fmt.Sprintf("distribution/%s %s", version.Version(), runtime.Version())
|
storageParams["useragent"] = fmt.Sprintf("distribution/%s %s", version.Version, runtime.Version())
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
@ -157,11 +155,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not configure HTTP secret for a proxy registry as HTTP secret
|
|
||||||
// is only used for blob uploads and a proxy registry does not support blob uploads.
|
|
||||||
if !app.isCache {
|
|
||||||
app.configureSecret(config)
|
app.configureSecret(config)
|
||||||
}
|
|
||||||
app.configureEvents(config)
|
app.configureEvents(config)
|
||||||
app.configureRedis(config)
|
app.configureRedis(config)
|
||||||
app.configureLogHook(config)
|
app.configureLogHook(config)
|
||||||
|
@ -190,21 +184,6 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// configure tag lookup concurrency limit
|
|
||||||
if p := config.Storage.TagParameters(); p != nil {
|
|
||||||
l, ok := p["concurrencylimit"]
|
|
||||||
if ok {
|
|
||||||
limit, ok := l.(int)
|
|
||||||
if !ok {
|
|
||||||
panic("tag lookup concurrency limit config key must have a integer value")
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
|
||||||
panic("tag lookup concurrency limit should be a non-negative integer value")
|
|
||||||
}
|
|
||||||
options = append(options, storage.TagLookupConcurrencyLimit(limit))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// configure redirects
|
// configure redirects
|
||||||
var redirectDisabled bool
|
var redirectDisabled bool
|
||||||
if redirectConfig, ok := config.Storage["redirect"]; ok {
|
if redirectConfig, ok := config.Storage["redirect"]; ok {
|
||||||
|
@ -257,21 +236,6 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App {
|
||||||
options = append(options, storage.ManifestURLsDenyRegexp(re))
|
options = append(options, storage.ManifestURLsDenyRegexp(re))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.Validation.Manifests.Indexes.Platforms {
|
|
||||||
case "list":
|
|
||||||
options = append(options, storage.EnableValidateImageIndexImagesExist)
|
|
||||||
for _, platform := range config.Validation.Manifests.Indexes.PlatformList {
|
|
||||||
options = append(options, storage.AddValidateImageIndexImagesExistPlatform(platform.Architecture, platform.OS))
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
case "none":
|
|
||||||
dcontext.GetLogger(app).Warn("Image index completeness validation has been disabled, which is an experimental option because other container tooling might expect all image indexes to be complete")
|
|
||||||
case "all":
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
options = append(options, storage.EnableValidateImageIndexImagesExist)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// configure storage caches
|
// configure storage caches
|
||||||
|
@ -447,14 +411,6 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown close the underlying registry
|
|
||||||
func (app *App) Shutdown() error {
|
|
||||||
if r, ok := app.registry.(proxy.Closer); ok {
|
|
||||||
return r.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// register a handler with the application, by route name. The handler will be
|
// register a handler with the application, by route name. The handler will be
|
||||||
// passed through the application filters and context will be constructed at
|
// passed through the application filters and context will be constructed at
|
||||||
// request time.
|
// request time.
|
||||||
|
@ -531,41 +487,12 @@ func (app *App) configureEvents(configuration *configuration.Configuration) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *App) configureRedis(cfg *configuration.Configuration) {
|
func (app *App) configureRedis(cfg *configuration.Configuration) {
|
||||||
if len(cfg.Redis.Options.Addrs) == 0 {
|
if cfg.Redis.Addr == "" {
|
||||||
dcontext.GetLogger(app).Infof("redis not configured")
|
dcontext.GetLogger(app).Infof("redis not configured")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// redis TLS config
|
app.redis = app.createPool(cfg.Redis)
|
||||||
if cfg.Redis.TLS.Certificate != "" || cfg.Redis.TLS.Key != "" {
|
|
||||||
var err error
|
|
||||||
tlsConf := &tls.Config{}
|
|
||||||
tlsConf.Certificates = make([]tls.Certificate, 1)
|
|
||||||
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(cfg.Redis.TLS.Certificate, cfg.Redis.TLS.Key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if len(cfg.Redis.TLS.ClientCAs) != 0 {
|
|
||||||
pool := x509.NewCertPool()
|
|
||||||
for _, ca := range cfg.Redis.TLS.ClientCAs {
|
|
||||||
caPem, err := os.ReadFile(ca)
|
|
||||||
if err != nil {
|
|
||||||
dcontext.GetLogger(app).Errorf("failed reading redis client CA: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok := pool.AppendCertsFromPEM(caPem); !ok {
|
|
||||||
dcontext.GetLogger(app).Error("could not add CA to pool")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
|
|
||||||
tlsConf.ClientCAs = pool
|
|
||||||
}
|
|
||||||
cfg.Redis.Options.TLSConfig = tlsConf
|
|
||||||
}
|
|
||||||
|
|
||||||
app.redis = app.createPool(cfg.Redis.Options)
|
|
||||||
|
|
||||||
// Enable metrics instrumentation.
|
// Enable metrics instrumentation.
|
||||||
if err := redisotel.InstrumentMetrics(app.redis); err != nil {
|
if err := redisotel.InstrumentMetrics(app.redis); err != nil {
|
||||||
|
@ -587,12 +514,25 @@ func (app *App) configureRedis(cfg *configuration.Configuration) {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *App) createPool(cfg redis.UniversalOptions) redis.UniversalClient {
|
func (app *App) createPool(cfg configuration.Redis) *redis.Client {
|
||||||
cfg.OnConnect = func(ctx context.Context, cn *redis.Conn) error {
|
return redis.NewClient(&redis.Options{
|
||||||
|
Addr: cfg.Addr,
|
||||||
|
OnConnect: func(ctx context.Context, cn *redis.Conn) error {
|
||||||
res := cn.Ping(ctx)
|
res := cn.Ping(ctx)
|
||||||
return res.Err()
|
return res.Err()
|
||||||
}
|
},
|
||||||
return redis.NewUniversalClient(&cfg)
|
Username: cfg.Username,
|
||||||
|
Password: cfg.Password,
|
||||||
|
DB: cfg.DB,
|
||||||
|
MaxRetries: 3,
|
||||||
|
DialTimeout: cfg.DialTimeout,
|
||||||
|
ReadTimeout: cfg.ReadTimeout,
|
||||||
|
WriteTimeout: cfg.WriteTimeout,
|
||||||
|
PoolFIFO: false,
|
||||||
|
MaxIdleConns: cfg.Pool.MaxIdle,
|
||||||
|
PoolSize: cfg.Pool.MaxActive,
|
||||||
|
ConnMaxIdleTime: cfg.Pool.IdleTimeout,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureLogHook prepares logging hook parameters.
|
// configureLogHook prepares logging hook parameters.
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/internal/dcontext"
|
"github.com/distribution/distribution/v3/internal/dcontext"
|
||||||
|
@ -14,13 +13,11 @@ import (
|
||||||
"github.com/distribution/distribution/v3/manifest/ocischema"
|
"github.com/distribution/distribution/v3/manifest/ocischema"
|
||||||
"github.com/distribution/distribution/v3/manifest/schema2"
|
"github.com/distribution/distribution/v3/manifest/schema2"
|
||||||
"github.com/distribution/distribution/v3/registry/api/errcode"
|
"github.com/distribution/distribution/v3/registry/api/errcode"
|
||||||
"github.com/distribution/distribution/v3/registry/storage"
|
|
||||||
"github.com/distribution/distribution/v3/registry/storage/driver"
|
"github.com/distribution/distribution/v3/registry/storage/driver"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/gorilla/handlers"
|
"github.com/gorilla/handlers"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -215,11 +212,6 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request)
|
||||||
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
|
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
|
||||||
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
|
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
|
||||||
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
|
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
|
||||||
|
|
||||||
if r.Method == http.MethodHead {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := w.Write(p); err != nil {
|
if _, err := w.Write(p); err != nil {
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
@ -484,26 +476,12 @@ func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Reques
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
errs []error
|
|
||||||
mu sync.Mutex
|
|
||||||
)
|
|
||||||
g := errgroup.Group{}
|
|
||||||
g.SetLimit(storage.DefaultConcurrencyLimit)
|
|
||||||
for _, tag := range referencedTags {
|
for _, tag := range referencedTags {
|
||||||
tag := tag
|
|
||||||
|
|
||||||
g.Go(func() error {
|
|
||||||
if err := tagService.Untag(imh, tag); err != nil {
|
if err := tagService.Untag(imh, tag); err != nil {
|
||||||
mu.Lock()
|
imh.Errors = append(imh.Errors, err)
|
||||||
errs = append(errs, err)
|
return
|
||||||
mu.Unlock()
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
_ = g.Wait() // imh will record all errors, so ignore the error of Wait()
|
|
||||||
imh.Errors = errs
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusAccepted)
|
w.WriteHeader(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,23 +17,14 @@ type userpass struct {
|
||||||
password string
|
password string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u userpass) Basic(_ *url.URL) (string, string) {
|
|
||||||
return u.username, u.password
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u userpass) RefreshToken(_ *url.URL, service string) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u userpass) SetRefreshToken(_ *url.URL, service, token string) {
|
|
||||||
}
|
|
||||||
|
|
||||||
type credentials struct {
|
type credentials struct {
|
||||||
creds map[string]userpass
|
creds map[string]userpass
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c credentials) Basic(u *url.URL) (string, string) {
|
func (c credentials) Basic(u *url.URL) (string, string) {
|
||||||
return c.creds[u.String()].Basic(u)
|
up := c.creds[u.String()]
|
||||||
|
|
||||||
|
return up.username, up.password
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c credentials) RefreshToken(u *url.URL, service string) string {
|
func (c credentials) RefreshToken(u *url.URL, service string) string {
|
||||||
|
@ -44,12 +35,12 @@ func (c credentials) SetRefreshToken(u *url.URL, service, token string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureAuth stores credentials for challenge responses
|
// configureAuth stores credentials for challenge responses
|
||||||
func configureAuth(username, password, remoteURL string) (auth.CredentialStore, auth.CredentialStore, error) {
|
func configureAuth(username, password, remoteURL string) (auth.CredentialStore, error) {
|
||||||
creds := map[string]userpass{}
|
creds := map[string]userpass{}
|
||||||
|
|
||||||
authURLs, err := getAuthURLs(remoteURL)
|
authURLs, err := getAuthURLs(remoteURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, url := range authURLs {
|
for _, url := range authURLs {
|
||||||
|
@ -60,7 +51,7 @@ func configureAuth(username, password, remoteURL string) (auth.CredentialStore,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return credentials{creds: creds}, userpass{username: username, password: password}, nil
|
return credentials{creds: creds}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAuthURLs(remoteURL string) ([]string, error) {
|
func getAuthURLs(remoteURL string) ([]string, error) {
|
||||||
|
|
|
@ -33,20 +33,22 @@ var inflight = make(map[digest.Digest]struct{})
|
||||||
// mu protects inflight
|
// mu protects inflight
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
|
|
||||||
func setResponseHeaders(h http.Header, length int64, mediaType string, digest digest.Digest) {
|
func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) {
|
||||||
h.Set("Content-Length", strconv.FormatInt(length, 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(length, 10))
|
||||||
h.Set("Content-Type", mediaType)
|
w.Header().Set("Content-Type", mediaType)
|
||||||
h.Set("Docker-Content-Digest", digest.String())
|
w.Header().Set("Docker-Content-Digest", digest.String())
|
||||||
h.Set("Etag", digest.String())
|
w.Header().Set("Etag", digest.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer, h http.Header) (distribution.Descriptor, error) {
|
func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) {
|
||||||
desc, err := pbs.remoteStore.Stat(ctx, dgst)
|
desc, err := pbs.remoteStore.Stat(ctx, dgst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
setResponseHeaders(h, desc.Size, desc.MediaType, dgst)
|
if w, ok := writer.(http.ResponseWriter); ok {
|
||||||
|
setResponseHeaders(w, desc.Size, desc.MediaType, dgst)
|
||||||
|
}
|
||||||
|
|
||||||
remoteReader, err := pbs.remoteStore.Open(ctx, dgst)
|
remoteReader, err := pbs.remoteStore.Open(ctx, dgst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -100,7 +102,7 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
|
||||||
// Will return the blob from the remote store directly.
|
// Will return the blob from the remote store directly.
|
||||||
// TODO Maybe we could reuse the these blobs are serving remotely and caching locally.
|
// TODO Maybe we could reuse the these blobs are serving remotely and caching locally.
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
_, err := pbs.copyContent(ctx, dgst, w, w.Header())
|
_, err := pbs.copyContent(ctx, dgst, w)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
inflight[dgst] = struct{}{}
|
inflight[dgst] = struct{}{}
|
||||||
|
@ -120,7 +122,7 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
|
||||||
// Serving client and storing locally over same fetching request.
|
// Serving client and storing locally over same fetching request.
|
||||||
// This can prevent a redundant blob fetching.
|
// This can prevent a redundant blob fetching.
|
||||||
multiWriter := io.MultiWriter(w, bw)
|
multiWriter := io.MultiWriter(w, bw)
|
||||||
desc, err := pbs.copyContent(ctx, dgst, multiWriter, w.Header())
|
desc, err := pbs.copyContent(ctx, dgst, multiWriter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -448,22 +448,12 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := w.Result()
|
bodyBytes := w.Body.Bytes()
|
||||||
bodyBytes, err := io.ReadAll(resp.Body)
|
|
||||||
resp.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf(err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
localDigest := digest.FromBytes(bodyBytes)
|
localDigest := digest.FromBytes(bodyBytes)
|
||||||
if localDigest != remoteBlob.Digest {
|
if localDigest != remoteBlob.Digest {
|
||||||
t.Errorf("Mismatching blob fetch from proxy")
|
t.Errorf("Mismatching blob fetch from proxy")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if resp.Header.Get("Docker-Content-Digest") != localDigest.String() {
|
|
||||||
t.Errorf("Mismatching digest in response header")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
desc, err := te.store.localStore.Stat(te.ctx, remoteBlob.Digest)
|
desc, err := te.store.localStore.Stat(te.ctx, remoteBlob.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"github.com/distribution/distribution/v3/testutil"
|
"github.com/distribution/distribution/v3/testutil"
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type statsManifest struct {
|
type statsManifest struct {
|
||||||
|
@ -154,8 +153,7 @@ func populateRepo(ctx context.Context, t *testing.T, repository distribution.Rep
|
||||||
}
|
}
|
||||||
|
|
||||||
m := schema2.Manifest{
|
m := schema2.Manifest{
|
||||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
Versioned: schema2.SchemaVersion,
|
||||||
MediaType: schema2.MediaTypeManifest,
|
|
||||||
Config: distribution.Descriptor{
|
Config: distribution.Descriptor{
|
||||||
MediaType: "foo/bar",
|
MediaType: "foo/bar",
|
||||||
Digest: configDigest,
|
Digest: configDigest,
|
||||||
|
|
|
@ -62,16 +62,6 @@ func init() {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
metrics.Register(prometheus.ProxyNamespace)
|
metrics.Register(prometheus.ProxyNamespace)
|
||||||
initPrometheusMetrics("blob")
|
|
||||||
initPrometheusMetrics("manifest")
|
|
||||||
}
|
|
||||||
|
|
||||||
func initPrometheusMetrics(value string) {
|
|
||||||
requests.WithValues(value).Inc(0)
|
|
||||||
hits.WithValues(value).Inc(0)
|
|
||||||
misses.WithValues(value).Inc(0)
|
|
||||||
pulledBytes.WithValues(value).Inc(0)
|
|
||||||
pushedBytes.WithValues(value).Inc(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobPull tracks metrics about blobs pulled into the cache
|
// BlobPull tracks metrics about blobs pulled into the cache
|
||||||
|
|
|
@ -8,8 +8,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3"
|
"github.com/distribution/distribution/v3"
|
||||||
"github.com/distribution/distribution/v3/configuration"
|
"github.com/distribution/distribution/v3/configuration"
|
||||||
"github.com/distribution/distribution/v3/internal/client"
|
"github.com/distribution/distribution/v3/internal/client"
|
||||||
|
@ -20,6 +18,7 @@ import (
|
||||||
"github.com/distribution/distribution/v3/registry/proxy/scheduler"
|
"github.com/distribution/distribution/v3/registry/proxy/scheduler"
|
||||||
"github.com/distribution/distribution/v3/registry/storage"
|
"github.com/distribution/distribution/v3/registry/storage"
|
||||||
"github.com/distribution/distribution/v3/registry/storage/driver"
|
"github.com/distribution/distribution/v3/registry/storage/driver"
|
||||||
|
"github.com/distribution/reference"
|
||||||
)
|
)
|
||||||
|
|
||||||
var repositoryTTL = 24 * 7 * time.Hour
|
var repositoryTTL = 24 * 7 * time.Hour
|
||||||
|
@ -31,7 +30,6 @@ type proxyingRegistry struct {
|
||||||
ttl *time.Duration
|
ttl *time.Duration
|
||||||
remoteURL url.URL
|
remoteURL url.URL
|
||||||
authChallenger authChallenger
|
authChallenger authChallenger
|
||||||
basicAuth auth.CredentialStore
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRegistryPullThroughCache creates a registry acting as a pull through cache
|
// NewRegistryPullThroughCache creates a registry acting as a pull through cache
|
||||||
|
@ -114,7 +112,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cs, b, err := configureAuth(config.Username, config.Password, config.RemoteURL)
|
cs, err := configureAuth(config.Username, config.Password, config.RemoteURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -129,7 +127,6 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name
|
||||||
cm: challenge.NewSimpleManager(),
|
cm: challenge.NewSimpleManager(),
|
||||||
cs: cs,
|
cs: cs,
|
||||||
},
|
},
|
||||||
basicAuth: b,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,8 +155,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named
|
||||||
|
|
||||||
tr := transport.NewTransport(http.DefaultTransport,
|
tr := transport.NewTransport(http.DefaultTransport,
|
||||||
auth.NewAuthorizer(c.challengeManager(),
|
auth.NewAuthorizer(c.challengeManager(),
|
||||||
auth.NewTokenHandlerWithOptions(tkopts),
|
auth.NewTokenHandlerWithOptions(tkopts)))
|
||||||
auth.NewBasicHandler(pr.basicAuth)))
|
|
||||||
|
|
||||||
localRepo, err := pr.embedded.Repository(ctx, name)
|
localRepo, err := pr.embedded.Repository(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -215,15 +211,6 @@ func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter {
|
||||||
return pr.embedded.BlobStatter()
|
return pr.embedded.BlobStatter()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Closer interface {
|
|
||||||
// Close release all resources used by this object
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pr *proxyingRegistry) Close() error {
|
|
||||||
return pr.scheduler.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// authChallenger encapsulates a request to the upstream to establish credential challenges
|
// authChallenger encapsulates a request to the upstream to establish credential challenges
|
||||||
type authChallenger interface {
|
type authChallenger interface {
|
||||||
tryEstablishChallenges(context.Context) error
|
tryEstablishChallenges(context.Context) error
|
||||||
|
|
|
@ -206,13 +206,12 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops the scheduler.
|
// Stop stops the scheduler.
|
||||||
func (ttles *TTLExpirationScheduler) Stop() error {
|
func (ttles *TTLExpirationScheduler) Stop() {
|
||||||
ttles.Lock()
|
ttles.Lock()
|
||||||
defer ttles.Unlock()
|
defer ttles.Unlock()
|
||||||
|
|
||||||
err := ttles.writeState()
|
if err := ttles.writeState(); err != nil {
|
||||||
if err != nil {
|
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
|
||||||
err = fmt.Errorf("error writing scheduler state: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range ttles.entries {
|
for _, entry := range ttles.entries {
|
||||||
|
@ -222,7 +221,6 @@ func (ttles *TTLExpirationScheduler) Stop() error {
|
||||||
close(ttles.doneChan)
|
close(ttles.doneChan)
|
||||||
ttles.saveTimer.Stop()
|
ttles.saveTimer.Stop()
|
||||||
ttles.stopped = true
|
ttles.stopped = true
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ttles *TTLExpirationScheduler) writeState() error {
|
func (ttles *TTLExpirationScheduler) writeState() error {
|
||||||
|
|
|
@ -136,12 +136,7 @@ func TestRestoreOld(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
|
t.Fatalf("Error starting ttlExpirationScheduler: %s", err)
|
||||||
}
|
}
|
||||||
defer func(s *TTLExpirationScheduler) {
|
defer s.Stop()
|
||||||
err := s.Stop()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error stopping ttlExpirationScheduler: %s", err)
|
|
||||||
}
|
|
||||||
}(s)
|
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
|
@ -182,10 +177,7 @@ func TestStopRestore(t *testing.T) {
|
||||||
|
|
||||||
// Start and stop before all operations complete
|
// Start and stop before all operations complete
|
||||||
// state will be written to fs
|
// state will be written to fs
|
||||||
err = s.Stop()
|
s.Stop()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
// v2 will restore state from fs
|
// v2 will restore state from fs
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -21,8 +20,6 @@ import (
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
"golang.org/x/crypto/acme"
|
"golang.org/x/crypto/acme"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
"golang.org/x/crypto/acme/autocert"
|
||||||
"golang.org/x/net/http2"
|
|
||||||
"golang.org/x/net/http2/h2c"
|
|
||||||
|
|
||||||
"github.com/distribution/distribution/v3/configuration"
|
"github.com/distribution/distribution/v3/configuration"
|
||||||
"github.com/distribution/distribution/v3/health"
|
"github.com/distribution/distribution/v3/health"
|
||||||
|
@ -82,6 +79,9 @@ var tlsVersions = map[string]uint16{
|
||||||
// defaultLogFormatter is the default formatter to use for logs.
|
// defaultLogFormatter is the default formatter to use for logs.
|
||||||
const defaultLogFormatter = "text"
|
const defaultLogFormatter = "text"
|
||||||
|
|
||||||
|
// this channel gets notified when process receives signal. It is global to ease unit testing
|
||||||
|
var quit = make(chan os.Signal, 1)
|
||||||
|
|
||||||
// HandlerFunc defines an http middleware
|
// HandlerFunc defines an http middleware
|
||||||
type HandlerFunc func(config *configuration.Configuration, handler http.Handler) http.Handler
|
type HandlerFunc func(config *configuration.Configuration, handler http.Handler) http.Handler
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ var ServeCmd = &cobra.Command{
|
||||||
Long: "`serve` stores and distributes Docker images.",
|
Long: "`serve` stores and distributes Docker images.",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
// setup context
|
// setup context
|
||||||
ctx := dcontext.WithVersion(dcontext.Background(), version.Version())
|
ctx := dcontext.WithVersion(dcontext.Background(), version.Version)
|
||||||
|
|
||||||
config, err := resolveConfiguration(args)
|
config, err := resolveConfiguration(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -128,7 +128,6 @@ type Registry struct {
|
||||||
config *configuration.Configuration
|
config *configuration.Configuration
|
||||||
app *handlers.App
|
app *handlers.App
|
||||||
server *http.Server
|
server *http.Server
|
||||||
quit chan os.Signal
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRegistry creates a new registry from a context and configuration struct.
|
// NewRegistry creates a new registry from a context and configuration struct.
|
||||||
|
@ -159,9 +158,6 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during open telemetry initialization: %v", err)
|
return nil, fmt.Errorf("error during open telemetry initialization: %v", err)
|
||||||
}
|
}
|
||||||
if config.HTTP.H2C.Enabled {
|
|
||||||
handler = h2c.NewHandler(handler, &http2.Server{})
|
|
||||||
}
|
|
||||||
handler = otelHandler(handler)
|
handler = otelHandler(handler)
|
||||||
|
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
|
@ -172,7 +168,6 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg
|
||||||
app: app,
|
app: app,
|
||||||
config: config,
|
config: config,
|
||||||
server: server,
|
server: server,
|
||||||
quit: make(chan os.Signal, 1),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,7 +308,7 @@ func (registry *Registry) ListenAndServe() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup channel to get notified on SIGTERM signal
|
// setup channel to get notified on SIGTERM signal
|
||||||
signal.Notify(registry.quit, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(quit, syscall.SIGTERM)
|
||||||
serveErr := make(chan error)
|
serveErr := make(chan error)
|
||||||
|
|
||||||
// Start serving in goroutine and listen for stop signal in main thread
|
// Start serving in goroutine and listen for stop signal in main thread
|
||||||
|
@ -324,24 +319,15 @@ func (registry *Registry) ListenAndServe() error {
|
||||||
select {
|
select {
|
||||||
case err := <-serveErr:
|
case err := <-serveErr:
|
||||||
return err
|
return err
|
||||||
case <-registry.quit:
|
case <-quit:
|
||||||
dcontext.GetLogger(registry.app).Info("stopping server gracefully. Draining connections for ", config.HTTP.DrainTimeout)
|
dcontext.GetLogger(registry.app).Info("stopping server gracefully. Draining connections for ", config.HTTP.DrainTimeout)
|
||||||
// shutdown the server with a grace period of configured timeout
|
// shutdown the server with a grace period of configured timeout
|
||||||
c, cancel := context.WithTimeout(context.Background(), config.HTTP.DrainTimeout)
|
c, cancel := context.WithTimeout(context.Background(), config.HTTP.DrainTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return registry.Shutdown(c)
|
return registry.server.Shutdown(c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown gracefully shuts down the registry's HTTP server and application object.
|
|
||||||
func (registry *Registry) Shutdown(ctx context.Context) error {
|
|
||||||
err := registry.server.Shutdown(ctx)
|
|
||||||
if appErr := registry.app.Shutdown(); appErr != nil {
|
|
||||||
err = errors.Join(err, appErr)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func configureDebugServer(config *configuration.Configuration) {
|
func configureDebugServer(config *configuration.Configuration) {
|
||||||
if config.HTTP.Debug.Addr != "" {
|
if config.HTTP.Debug.Addr != "" {
|
||||||
go func(addr string) {
|
go func(addr string) {
|
||||||
|
|
|
@ -103,7 +103,7 @@ func TestGracefulShutdown(t *testing.T) {
|
||||||
fmt.Fprintf(conn, "GET /v2/ ")
|
fmt.Fprintf(conn, "GET /v2/ ")
|
||||||
|
|
||||||
// send stop signal
|
// send stop signal
|
||||||
registry.quit <- os.Interrupt
|
quit <- os.Interrupt
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// try connecting again. it shouldn't
|
// try connecting again. it shouldn't
|
||||||
|
@ -325,7 +325,7 @@ func TestRegistrySupportedCipherSuite(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send stop signal
|
// send stop signal
|
||||||
registry.quit <- os.Interrupt
|
quit <- os.Interrupt
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,7 +369,7 @@ func TestRegistryUnsupportedCipherSuite(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send stop signal
|
// send stop signal
|
||||||
registry.quit <- os.Interrupt
|
quit <- os.Interrupt
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue