Merge pull request #3766 from thaJeztah/gofumpt

format code with gofumpt
This commit is contained in:
Hayley Swimelar 2022-11-04 12:19:53 +01:00 committed by GitHub
commit 52d948a9f5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
107 changed files with 404 additions and 483 deletions

View file

@ -62,7 +62,6 @@ func main() {
if flag.NArg() > 0 { if flag.NArg() > 0 {
for _, path := range flag.Args() { for _, path := range flag.Args() {
fp, err := os.Open(path) fp, err := os.Open(path)
if err != nil { if err != nil {
log.Printf("%s: %v", path, err) log.Printf("%s: %v", path, err)
fail = true fail = true

View file

@ -27,7 +27,6 @@ import (
var spaceRegex = regexp.MustCompile(`\n\s*`) var spaceRegex = regexp.MustCompile(`\n\s*`)
func main() { func main() {
if len(os.Args) != 2 { if len(os.Args) != 2 {
log.Fatalln("please specify a template to execute.") log.Fatalln("please specify a template to execute.")
} }
@ -127,5 +126,4 @@ end:
} }
return output return output
} }

View file

@ -360,7 +360,6 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil) c.Assert(err, NotNil)
} }
// TestParseWithDifferentEnvReporting validates that environment variables // TestParseWithDifferentEnvReporting validates that environment variables

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package configuration package configuration

View file

@ -15,7 +15,7 @@
// The above will store the version in the context and will be available to // The above will store the version in the context and will be available to
// the logger. // the logger.
// //
// Logging // # Logging
// //
// The most useful aspect of this package is GetLogger. This function takes // The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the // any context.Context interface and returns the current logger from the
@ -65,7 +65,7 @@
// added to the request context, is unique to that context and can have // added to the request context, is unique to that context and can have
// request scoped variables. // request scoped variables.
// //
// HTTP Requests // # HTTP Requests
// //
// This package also contains several methods for working with http requests. // This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the // The concepts are very similar to those described above. We simply place the

View file

@ -20,9 +20,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
var ( var enforceRepoClass bool
enforceRepoClass bool
)
func main() { func main() {
var ( var (
@ -110,7 +108,6 @@ func main() {
if err != nil { if err != nil {
logrus.Infof("Error serving: %v", err) logrus.Infof("Error serving: %v", err)
} }
} }
// handlerWithContext wraps the given context-aware handler by setting up the // handlerWithContext wraps the given context-aware handler by setting up the

View file

@ -5,11 +5,10 @@ import (
"crypto/rsa" "crypto/rsa"
"encoding/base64" "encoding/base64"
"errors" "errors"
"strings"
"testing" "testing"
"time" "time"
"strings"
"github.com/distribution/distribution/v3/registry/auth" "github.com/distribution/distribution/v3/registry/auth"
"github.com/docker/libtrust" "github.com/docker/libtrust"
) )
@ -49,7 +48,6 @@ func TestCreateJWTSuccessWithEmptyACL(t *testing.T) {
if !strings.Contains(json, "test") { if !strings.Contains(json, "test") {
t.Fatal("Valid token was not generated.") t.Fatal("Valid token was not generated.")
} }
} }
func decodeJWT(rawToken string) (string, error) { func decodeJWT(rawToken string) (string, error) {

View file

@ -187,7 +187,6 @@ func TestAll(t *testing.T) {
t.Fatalf("Missing element at position %d: %s", i, dgst) t.Fatalf("Missing element at position %d: %s", i, dgst)
} }
} }
} }
func assertEqualShort(t *testing.T, actual, expected string) { func assertEqualShort(t *testing.T, actual, expected string) {
@ -363,9 +362,11 @@ func BenchmarkLookup1000(b *testing.B) {
func BenchmarkShortCode10(b *testing.B) { func BenchmarkShortCode10(b *testing.B) {
benchShortCodeNTable(b, 10, 12) benchShortCodeNTable(b, 10, 12)
} }
func BenchmarkShortCode100(b *testing.B) { func BenchmarkShortCode100(b *testing.B) {
benchShortCodeNTable(b, 100, 12) benchShortCodeNTable(b, 100, 12)
} }
func BenchmarkShortCode1000(b *testing.B) { func BenchmarkShortCode1000(b *testing.B) {
benchShortCodeNTable(b, 1000, 12) benchShortCodeNTable(b, 1000, 12)
} }

View file

@ -7,9 +7,7 @@ import (
"github.com/distribution/distribution/v3/health" "github.com/distribution/distribution/v3/health"
) )
var ( var updater = health.NewStatusUpdater()
updater = health.NewStatusUpdater()
)
// DownHandler registers a manual_http_status that always returns an Error // DownHandler registers a manual_http_status that always returns an Error
func DownHandler(w http.ResponseWriter, r *http.Request) { func DownHandler(w http.ResponseWriter, r *http.Request) {

View file

@ -13,7 +13,7 @@
// particularly useful for checks that verify upstream connectivity or // particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout. // database status, since they might take a long time to return/timeout.
// //
// Installing // # Installing
// //
// To install health, just import it in your application: // To install health, just import it in your application:
// //
@ -35,7 +35,7 @@
// After importing these packages to your main application, you can start // After importing these packages to your main application, you can start
// registering checks. // registering checks.
// //
// Registering Checks // # Registering Checks
// //
// The recommended way of registering checks is using a periodic Check. // The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the // PeriodicChecks run on a certain schedule and asynchronously update the
@ -84,7 +84,7 @@
// return Errors.new("This is an error!") // return Errors.new("This is an error!")
// })) // }))
// //
// Examples // # Examples
// //
// You could also use the health checker mechanism to ensure your application // You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to // only comes up if certain conditions are met, or to allow the developer to

View file

@ -11,14 +11,12 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
var (
// SchemaVersion provides a pre-initialized version structure for this // SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest. // packages version of the manifest.
SchemaVersion = manifest.Versioned{ var SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest, MediaType: v1.MediaTypeImageManifest,
} }
)
func init() { func init() {
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {

View file

@ -20,13 +20,11 @@ const (
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
) )
var (
// SchemaVersion provides a pre-initialized version structure for this // SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest. // packages version of the manifest.
SchemaVersion = manifest.Versioned{ var SchemaVersion = manifest.Versioned{
SchemaVersion: 1, SchemaVersion: 1,
} }
)
func init() { func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -149,7 +147,6 @@ func (sm SignedManifest) References() []distribution.Descriptor {
} }
return dependencies return dependencies
} }
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner

View file

@ -42,7 +42,6 @@ func TestManifestUnmarshaling(t *testing.T) {
if !reflect.DeepEqual(&signed, env.signed) { if !reflect.DeepEqual(&signed, env.signed) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed)
} }
} }
func TestManifestVerification(t *testing.T) { func TestManifestVerification(t *testing.T) {

View file

@ -65,7 +65,6 @@ func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable)
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil return nil
} }
// References returns the current references added to this builder // References returns the current references added to this builder

View file

@ -33,14 +33,12 @@ const (
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
) )
var (
// SchemaVersion provides a pre-initialized version structure for this // SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest. // packages version of the manifest.
SchemaVersion = manifest.Versioned{ var SchemaVersion = manifest.Versioned{
SchemaVersion: 2, SchemaVersion: 2,
MediaType: MediaTypeManifest, MediaType: MediaTypeManifest,
} }
)
func init() { func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -119,7 +117,6 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
if manifest.MediaType != MediaTypeManifest { if manifest.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType) MediaTypeManifest, manifest.MediaType)
} }
m.Manifest = manifest m.Manifest = manifest

View file

@ -233,7 +233,6 @@ func checkCommon(t *testing.T, event events.Event) {
if event.(Event).Target.Repository != repo { if event.(Event).Target.Repository != repo {
t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo) t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo)
} }
} }
type testSinkFn func(event events.Event) error type testSinkFn func(event events.Event) error

View file

@ -143,9 +143,7 @@ type SourceRecord struct {
InstanceID string `json:"instanceID,omitempty"` InstanceID string `json:"instanceID,omitempty"`
} }
var (
// ErrSinkClosed is returned if a write is issued to a sink that has been // ErrSinkClosed is returned if a write is issued to a sink that has been
// closed. If encountered, the error should be considered terminal and // closed. If encountered, the error should be considered terminal and
// retries will not be successful. // retries will not be successful.
ErrSinkClosed = fmt.Errorf("sink: closed") var ErrSinkClosed = fmt.Errorf("sink: closed")
)

View file

@ -13,7 +13,7 @@ import (
// envelope has changed. If this code fails, the revision of the protocol may // envelope has changed. If this code fails, the revision of the protocol may
// need to be incremented. // need to be incremented.
func TestEventEnvelopeJSONFormat(t *testing.T) { func TestEventEnvelopeJSONFormat(t *testing.T) {
var expected = strings.TrimSpace(` expected := strings.TrimSpace(`
{ {
"events": [ "events": [
{ {
@ -114,7 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
prototype.Request.UserAgent = "test/0.1" prototype.Request.UserAgent = "test/0.1"
prototype.Source.Addr = "hostname.local:port" prototype.Source.Addr = "hostname.local:port"
var manifestPush = prototype manifestPush := prototype
manifestPush.ID = "asdf-asdf-asdf-asdf-0" manifestPush.ID = "asdf-asdf-asdf-asdf-0"
manifestPush.Target.Digest = "sha256:0123456789abcdef0" manifestPush.Target.Digest = "sha256:0123456789abcdef0"
manifestPush.Target.Length = 1 manifestPush.Target.Length = 1
@ -123,7 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
manifestPush.Target.Repository = "library/test" manifestPush.Target.Repository = "library/test"
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush0 = prototype layerPush0 := prototype
layerPush0.ID = "asdf-asdf-asdf-asdf-1" layerPush0.ID = "asdf-asdf-asdf-asdf-1"
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
layerPush0.Target.Length = 2 layerPush0.Target.Length = 2
@ -132,7 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
layerPush0.Target.Repository = "library/test" layerPush0.Target.Repository = "library/test"
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush1 = prototype layerPush1 := prototype
layerPush1.ID = "asdf-asdf-asdf-asdf-2" layerPush1.ID = "asdf-asdf-asdf-asdf-2"
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
layerPush1.Target.Length = 3 layerPush1.Target.Length = 3

View file

@ -135,7 +135,7 @@ type headerRoundTripper struct {
} }
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var nreq = *req nreq := *req
nreq.Header = make(http.Header) nreq.Header = make(http.Header)
merge := func(headers http.Header) { merge := func(headers http.Header) {

View file

@ -197,7 +197,6 @@ func TestHTTPSink(t *testing.T) {
if err := sink.Close(); err == nil { if err := sink.Close(); err == nil {
t.Fatalf("second close should have returned error: %v", err) t.Fatalf("second close should have returned error: %v", err)
} }
} }
func createTestEvent(action, repo, typ string) Event { func createTestEvent(action, repo, typ string) Event {

View file

@ -3,13 +3,12 @@ package notifications
import ( import (
"reflect" "reflect"
"sync" "sync"
"testing"
"time" "time"
events "github.com/docker/go-events" events "github.com/docker/go-events"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"testing"
) )
func TestEventQueue(t *testing.T) { func TestEventQueue(t *testing.T) {

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package reference package reference

View file

@ -525,7 +525,6 @@ func TestReferenceRegexp(t *testing.T) {
for i := range testcases { for i := range testcases {
checkRegexp(t, ReferenceRegexp, testcases[i]) checkRegexp(t, ReferenceRegexp, testcases[i])
} }
} }
func TestIdentifierRegexp(t *testing.T) { func TestIdentifierRegexp(t *testing.T) {

View file

@ -86,7 +86,6 @@ func TestErrorCodes(t *testing.T) {
t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString)
} }
} }
} }
func TestErrorsManagement(t *testing.T) { func TestErrorsManagement(t *testing.T) {
@ -99,7 +98,6 @@ func TestErrorsManagement(t *testing.T) {
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data"))
p, err := json.Marshal(errs) p, err := json.Marshal(errs)
if err != nil { if err != nil {
t.Fatalf("error marashaling errors: %v", err) t.Fatalf("error marashaling errors: %v", err)
} }
@ -181,5 +179,4 @@ func TestErrorsManagement(t *testing.T) {
if e2.Detail != `stuff2` { if e2.Detail != `stuff2` {
t.Fatalf("e2 had wrong detail: %q", e2.Detail) t.Fatalf("e2 had wrong detail: %q", e2.Detail)
} }
} }

View file

@ -75,8 +75,10 @@ var (
}) })
) )
var nextCode = 1000 var (
var registerLock sync.Mutex nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and // Register will make the passed-in error known to the environment and
// return a new ErrorCode // return a new ErrorCode

View file

@ -262,7 +262,6 @@ type RouteDescriptor struct {
// MethodDescriptor provides a description of the requests that may be // MethodDescriptor provides a description of the requests that may be
// conducted with the target method. // conducted with the target method.
type MethodDescriptor struct { type MethodDescriptor struct {
// Method is an HTTP method, such as GET, PUT or POST. // Method is an HTTP method, such as GET, PUT or POST.
Method string Method string

View file

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package v2 package v2

View file

@ -265,7 +265,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
u := server.URL + testcase.RequestURI u := server.URL + testcase.RequestURI
resp, err := http.Get(u) resp, err := http.Get(u)
if err != nil { if err != nil {
t.Fatalf("error issuing get request: %v", err) t.Fatalf("error issuing get request: %v", err)
} }
@ -316,7 +315,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
resp.Body.Close() resp.Body.Close()
} }
} }
// -------------- START LICENSED CODE -------------- // -------------- START LICENSED CODE --------------

View file

@ -29,7 +29,6 @@
// } // }
// } // }
// } // }
//
package auth package auth
import ( import (

View file

@ -128,10 +128,10 @@ func createHtpasswdFile(path string) error {
return err return err
} }
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return err return err
} }
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o600)
if err != nil { if err != nil {
return fmt.Errorf("failed to open htpasswd path %s", err) return fmt.Errorf("failed to open htpasswd path %s", err)
} }

View file

@ -42,7 +42,7 @@ func TestBasicAccessController(t *testing.T) {
tempFile.Close() tempFile.Close()
var userNumber = 0 userNumber := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r) ctx := context.WithRequest(ctx, r)
@ -76,7 +76,6 @@ func TestBasicAccessController(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, server.URL, nil) req, _ := http.NewRequest(http.MethodGet, server.URL, nil)
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
t.Fatalf("unexpected error during GET: %v", err) t.Fatalf("unexpected error during GET: %v", err)
} }
@ -120,7 +119,6 @@ func TestBasicAccessController(t *testing.T) {
} }
} }
} }
} }
func TestCreateHtpasswdFile(t *testing.T) { func TestCreateHtpasswdFile(t *testing.T) {

View file

@ -8,7 +8,6 @@ import (
) )
func TestParseHTPasswd(t *testing.T) { func TestParseHTPasswd(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
input string input string
@ -81,5 +80,4 @@ asdf
t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries)
} }
} }
} }

View file

@ -70,7 +70,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey)) ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey))
return ctx, nil return ctx, nil
} }
type challenge struct { type challenge struct {

View file

@ -185,6 +185,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// VerifySigningKey attempts to get the key which was used to sign this token. // VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields: // The token header should contain either of these 3 fields:
//
// `x5c` - The x509 certificate chain for the signing key. Needs to be // `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified. // verified.
// `jwk` - The JSON Web Key representation of the signing key. // `jwk` - The JSON Web Key representation of the signing key.
@ -192,6 +193,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// `kid` - The unique identifier for the key. This library interprets it // `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in // as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options. // the trustedKeys field of the given verify options.
//
// Each of these methods are tried in that order of preference until the // Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned. // signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {

View file

@ -493,7 +493,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) {
defer os.Remove(rootCertBundleFilename) defer os.Remove(rootCertBundleFilename)
// Add something other than a certificate to the rootcertbundle // Add something other than a certificate to the rootcertbundle
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -38,7 +38,6 @@ func TestAuthChallengeParse(t *testing.T) {
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
} }
} }
func TestAuthChallengeNormalization(t *testing.T) { func TestAuthChallengeNormalization(t *testing.T) {
@ -49,7 +48,6 @@ func TestAuthChallengeNormalization(t *testing.T) {
} }
func testAuthChallengeNormalization(t *testing.T, host string) { func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleManager() scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
@ -85,7 +83,6 @@ func testAuthChallengeNormalization(t *testing.T, host string) {
} }
func testAuthChallengeConcurrent(t *testing.T, host string) { func testAuthChallengeConcurrent(t *testing.T, host string) {
scm := NewSimpleManager() scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))

View file

@ -50,7 +50,6 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re
func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) {
h := testutil.NewHandler(rrm) h := testutil.NewHandler(rrm)
wrapper := &testAuthenticationWrapper{ wrapper := &testAuthenticationWrapper{
headers: http.Header(map[string][]string{ headers: http.Header(map[string][]string{
"X-API-Version": {"registry/2.0"}, "X-API-Version": {"registry/2.0"},
"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"},

View file

@ -296,7 +296,6 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
desc.Size = length desc.Size = length
return desc, nil return desc, nil
} }
// Get issues a HEAD request for a Manifest against its named endpoint in order // Get issues a HEAD request for a Manifest against its named endpoint in order
@ -529,7 +528,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
} }
mt := resp.Header.Get("Content-Type") mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -667,7 +665,6 @@ func sanitizeLocation(location, base string) (string, error) {
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst) return bs.statter.Stat(ctx, dgst)
} }
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {

View file

@ -319,7 +319,6 @@ func TestBlobDelete(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Error deleting blob: %s", err.Error()) t.Errorf("Error deleting blob: %s", err.Error())
} }
} }
func TestBlobFetch(t *testing.T) { func TestBlobFetch(t *testing.T) {
@ -399,7 +398,6 @@ func TestBlobExistsNoContentLength(t *testing.T) {
if !strings.Contains(err.Error(), "missing content-length heade") { if !strings.Contains(err.Error(), "missing content-length heade") {
t.Fatalf("Expected missing content-length error message") t.Fatalf("Expected missing content-length error message")
} }
} }
func TestBlobExists(t *testing.T) { func TestBlobExists(t *testing.T) {
@ -986,7 +984,6 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b
"Content-Type": {schema1.MediaTypeSignedManifest}, "Content-Type": {schema1.MediaTypeSignedManifest},
}), }),
} }
} }
*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag})
} }
@ -1535,6 +1532,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) {
t.Fatalf("Unexpected digest") t.Fatalf("Unexpected digest")
} }
} }
func TestManifestTagsPaginated(t *testing.T) { func TestManifestTagsPaginated(t *testing.T) {
s := httptest.NewServer(http.NotFoundHandler()) s := httptest.NewServer(http.NotFoundHandler())
defer s.Close() defer s.Close()

View file

@ -87,7 +87,8 @@ func TestCatalogAPI(t *testing.T) {
values := url.Values{ values := url.Values{
"last": []string{""}, "last": []string{""},
"n": []string{strconv.Itoa(chunkLen)}} "n": []string{strconv.Itoa(chunkLen)},
}
catalogURL, err := env.builder.BuildCatalogURL(values) catalogURL, err := env.builder.BuildCatalogURL(values)
if err != nil { if err != nil {
@ -453,7 +454,6 @@ func TestBlobAPI(t *testing.T) {
defer env2.Shutdown() defer env2.Shutdown()
args = makeBlobArgs(t) args = makeBlobArgs(t)
testBlobAPI(t, env2, args) testBlobAPI(t, env2, args)
} }
func TestBlobDelete(t *testing.T) { func TestBlobDelete(t *testing.T) {
@ -1110,7 +1110,7 @@ const (
func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
// Initialize the mock driver // Initialize the mock driver
var errGenericStorage = errors.New("generic storage error") errGenericStorage := errors.New("generic storage error")
return &mockErrorDriver{ return &mockErrorDriver{
returnErrs: []mockErrorMapping{ returnErrs: []mockErrorMapping{
{ {
@ -1346,7 +1346,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
for i := range unsignedManifest.FSLayers { for i := range unsignedManifest.FSLayers {
rs, dgst, err := testutil.CreateRandomTarFile() rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err) t.Fatalf("error creating random layer %d: %v", i, err)
} }
@ -1450,7 +1449,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Re-push with a few different Content-Types. The official schema1 // Re-push with a few different Content-Types. The official schema1
@ -1684,7 +1682,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
for i := range manifest.Layers { for i := range manifest.Layers {
rs, dgst, err := testutil.CreateRandomTarFile() rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err) t.Fatalf("error creating random layer %d: %v", i, err)
} }
@ -2279,7 +2276,6 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) {
if len(tagsResponse.Tags) != 0 { if len(tagsResponse.Tags) != 0 {
t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags)
} }
} }
type testEnv struct { type testEnv struct {
@ -2308,7 +2304,6 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
config.Compatibility.Schema1.Enabled = true config.Compatibility.Schema1.Enabled = true
return newTestEnvWithConfig(t, &config) return newTestEnvWithConfig(t, &config)
} }
func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
@ -2334,7 +2329,6 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te
app := NewApp(ctx, config) app := NewApp(ctx, config)
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false)
if err != nil { if err != nil {
t.Fatalf("error creating url builder: %v", err) t.Fatalf("error creating url builder: %v", err)
} }
@ -2832,7 +2826,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
blobURL, _ := env.builder.BuildBlobURL(ref) blobURL, _ := env.builder.BuildBlobURL(ref)
resp, _ = httpDelete(blobURL) resp, _ = httpDelete(blobURL)
checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
} }
func TestProxyManifestGetByTag(t *testing.T) { func TestProxyManifestGetByTag(t *testing.T) {

View file

@ -703,7 +703,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return return
} }
repository, err := app.registry.Repository(context, nameRef) repository, err := app.registry.Repository(context, nameRef)
if err != nil { if err != nil {
dcontext.GetLogger(context).Errorf("error resolving repository: %v", err) dcontext.GetLogger(context).Errorf("error resolving repository: %v", err)
@ -983,7 +982,6 @@ func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespac
registry = rmw registry = rmw
} }
return registry, nil return registry, nil
} }
// applyRepoMiddleware wraps a repository with the configured middlewares // applyRepoMiddleware wraps a repository with the configured middlewares

View file

@ -120,13 +120,11 @@ func TestAppDispatcher(t *testing.T) {
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
u, err := route.URL(testcase.vars...) u, err := route.URL(testcase.vars...)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
resp, err := http.Get(u.String()) resp, err := http.Get(u.String())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -275,5 +273,4 @@ func TestAppendAccessRecords(t *testing.T) {
if ok := reflect.DeepEqual(result, expectedResult); !ok { if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected") t.Fatalf("Actual access record differs from expected")
} }
} }

View file

@ -79,7 +79,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req
blobs := buh.Repository.Blobs(buh) blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...) upload, err := blobs.Create(buh, options...)
if err != nil { if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok { if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
@ -219,7 +218,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
// really set the mediatype. For now, we can let the backend take care // really set the mediatype. For now, we can let the backend take care
// of this. // of this.
}) })
if err != nil { if err != nil {
switch err := err.(type) { switch err := err.(type) {
case distribution.ErrBlobInvalidDigest: case distribution.ErrBlobInvalidDigest:

View file

@ -34,7 +34,7 @@ type catalogAPIResponse struct {
} }
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
var moreEntries = true moreEntries := true
q := r.URL.Query() q := r.URL.Query()
lastEntry := q.Get("last") lastEntry := q.Get("last")

View file

@ -31,7 +31,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error {
// Get a channel that tells us if the client disconnects // Get a channel that tells us if the client disconnects
clientClosed := r.Context().Done() clientClosed := r.Context().Done()
var body = r.Body body := r.Body
if limit > 0 { if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit) body = http.MaxBytesReader(responseWriter, body, limit)
} }

View file

@ -479,7 +479,6 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest)
} }
return nil return nil
} }
// DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry. // DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.

View file

@ -12,8 +12,10 @@ import (
// used to register the constructor for different RegistryMiddleware backends. // used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc var (
var registryoptions []storage.RegistryOption middlewares map[string]InitFunc
registryoptions []storage.RegistryOption
)
// Register is used to register an InitFunc for // Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name. // a RegistryMiddleware backend with the given name.

View file

@ -221,6 +221,7 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
te.inRemote = inRemote te.inRemote = inRemote
te.numUnique = numUnique te.numUnique = numUnique
} }
func TestProxyStoreGet(t *testing.T) { func TestProxyStoreGet(t *testing.T) {
te := makeTestEnv(t, "foo/bar") te := makeTestEnv(t, "foo/bar")
@ -253,7 +254,6 @@ func TestProxyStoreGet(t *testing.T) {
if (*remoteStats)["get"] != 1 { if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count") t.Errorf("Unexpected remote get count")
} }
} }
func TestProxyStoreStat(t *testing.T) { func TestProxyStoreStat(t *testing.T) {
@ -284,7 +284,6 @@ func TestProxyStoreStat(t *testing.T) {
if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) {
t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger)
} }
} }
func TestProxyStoreServeHighConcurrency(t *testing.T) { func TestProxyStoreServeHighConcurrency(t *testing.T) {

View file

@ -271,5 +271,4 @@ func TestProxyManifests(t *testing.T) {
if env.manifests.authChallenger.(*mockChallenger).count != 2 { if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
} }
} }

View file

@ -70,5 +70,4 @@ func init() {
pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} {
return proxyMetrics.manifestMetrics return proxyMetrics.manifestMetrics
})) }))
} }

View file

@ -69,7 +69,6 @@ func TestSchedule(t *testing.T) {
s.Lock() s.Lock()
s.add(ref3, 1*timeUnit, entryTypeBlob) s.add(ref3, 1*timeUnit, entryTypeBlob)
s.Unlock() s.Unlock()
}() }()
// Ensure all repos are deleted // Ensure all repos are deleted
@ -195,7 +194,6 @@ func TestStopRestore(t *testing.T) {
if len(remainingRepos) != 0 { if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos) t.Fatalf("Repositories remaining: %#v", remainingRepos)
} }
} }
func TestDoubleStart(t *testing.T) { func TestDoubleStart(t *testing.T) {

View file

@ -73,12 +73,14 @@ var defaultCipherSuites = []uint16{
} }
// maps tls version strings to constants // maps tls version strings to constants
var defaultTLSVersionStr = "tls1.2" var (
var tlsVersions = map[string]uint16{ defaultTLSVersionStr = "tls1.2"
tlsVersions = map[string]uint16{
// user specified values // user specified values
"tls1.2": tls.VersionTLS12, "tls1.2": tls.VersionTLS12,
"tls1.3": tls.VersionTLS13, "tls1.3": tls.VersionTLS13,
} }
)
// this channel gets notified when process receives signal. It is global to ease unit testing // this channel gets notified when process receives signal. It is global to ease unit testing
var quit = make(chan os.Signal, 1) var quit = make(chan os.Signal, 1)
@ -89,7 +91,6 @@ var ServeCmd = &cobra.Command{
Short: "`serve` stores and distributes Docker images", Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.", Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
// setup context // setup context
ctx := dcontext.WithVersion(dcontext.Background(), version.Version) ctx := dcontext.WithVersion(dcontext.Background(), version.Version)

View file

@ -152,7 +152,7 @@ func TestGetCipherSuite(t *testing.T) {
t.Error("did not return expected error about unknown cipher suite") t.Error("did not return expected error about unknown cipher suite")
} }
var insecureCipherSuites = []string{ insecureCipherSuites := []string{
"TLS_RSA_WITH_RC4_128_SHA", "TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
@ -234,7 +234,7 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis
} }
keyPath := path.Join(os.TempDir(), name+".key") keyPath := path.Join(os.TempDir(), name+".key")
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err) return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err)
} }

View file

@ -36,8 +36,10 @@ var RootCmd = &cobra.Command{
}, },
} }
var dryRun bool var (
var removeUntagged bool dryRun bool
removeUntagged bool
)
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand // GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{ var GCCmd = &cobra.Command{

View file

@ -36,7 +36,6 @@ func TestWriteSeek(t *testing.T) {
bs := repository.Blobs(ctx) bs := repository.Blobs(ctx)
blobUpload, err := bs.Create(ctx) blobUpload, err := bs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }
@ -47,7 +46,6 @@ func TestWriteSeek(t *testing.T) {
if offset != int64(len(contents)) { if offset != int64(len(contents)) {
t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents))
} }
} }
// TestSimpleBlobUpload covers the blob upload process, exercising common // TestSimpleBlobUpload covers the blob upload process, exercising common
@ -75,7 +73,6 @@ func TestSimpleBlobUpload(t *testing.T) {
rd := io.TeeReader(randomDataReader, h) rd := io.TeeReader(randomDataReader, h)
blobUpload, err := bs.Create(ctx) blobUpload, err := bs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }
@ -385,7 +382,6 @@ func TestBlobMount(t *testing.T) {
sbs := sourceRepository.Blobs(ctx) sbs := sourceRepository.Blobs(ctx)
blobUpload, err := sbs.Create(ctx) blobUpload, err := sbs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }

View file

@ -121,7 +121,6 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{ bp, err := pathFor(blobDataPathSpec{
digest: dgst, digest: dgst,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -165,7 +164,6 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
path, err := pathFor(blobDataPathSpec{ path, err := pathFor(blobDataPathSpec{
digest: dgst, digest: dgst,
}) })
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }

View file

@ -15,9 +15,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
var ( var errResumableDigestNotAvailable = errors.New("resumable digest not available")
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
const ( const (
// digestSha256Empty is the canonical sha256 digest of empty data // digestSha256Empty is the canonical sha256 digest of empty data
@ -296,7 +294,6 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
blobPath, err := pathFor(blobDataPathSpec{ blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest, digest: desc.Digest,
}) })
if err != nil { if err != nil {
return err return err
} }
@ -355,7 +352,6 @@ func (bw *blobWriter) removeResources(ctx context.Context) error {
name: bw.blobStore.repository.Named().Name(), name: bw.blobStore.repository.Named().Name(),
id: bw.id, id: bw.id,
}) })
if err != nil { if err != nil {
return err return err
} }

View file

@ -85,7 +85,6 @@ func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry
alg: bw.digester.Digest().Algorithm(), alg: bw.digester.Digest().Algorithm(),
list: true, list: true,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -136,7 +135,6 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
alg: bw.digester.Digest().Algorithm(), alg: bw.digester.Digest().Algorithm(),
offset: bw.written, offset: bw.written,
}) })
if err != nil { if err != nil {
return err return err
} }

View file

@ -39,14 +39,16 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T,
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
Digest: "sha384:abc", Digest: "sha384:abc",
Size: 10, Size: 10,
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { MediaType: "application/octet-stream",
}); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error with invalid digest: %v", err) t.Fatalf("expected error with invalid digest: %v", err)
} }
if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{
Digest: "", Digest: "",
Size: 10, Size: 10,
MediaType: "application/octet-stream"}); err == nil { MediaType: "application/octet-stream",
}); err == nil {
t.Fatalf("expected error setting value on invalid descriptor") t.Fatalf("expected error setting value on invalid descriptor")
} }
@ -68,7 +70,8 @@ func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provi
expected := distribution.Descriptor{ expected := distribution.Descriptor{
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
Size: 10, Size: 10,
MediaType: "application/octet-stream"} MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar") cache, err := provider.RepositoryScoped("foo/bar")
if err != nil { if err != nil {
@ -152,7 +155,8 @@ func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider c
expected := distribution.Descriptor{ expected := distribution.Descriptor{
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
Size: 10, Size: 10,
MediaType: "application/octet-stream"} MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar") cache, err := provider.RepositoryScoped("foo/bar")
if err != nil { if err != nil {

View file

@ -14,10 +14,8 @@ type cachedBlobStatter struct {
backend distribution.BlobDescriptorService backend distribution.BlobDescriptorService
} }
var (
// cacheCount is the number of total cache request received/hits/misses // cacheCount is the number of total cache request received/hits/misses
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and // NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend. // falls back to a backend.

View file

@ -102,7 +102,6 @@ func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.N
if err != nil { if err != nil {
t.Fatalf("manifest upload failed: %v", err) t.Fatalf("manifest upload failed: %v", err)
} }
} }
func TestCatalog(t *testing.T) { func TestCatalog(t *testing.T) {
@ -289,8 +288,10 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) {
} }
} }
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") var (
var separatorChars = []byte("._-") filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string { func randomPath(length int64) string {
path := "/" path := "/"

View file

@ -93,7 +93,8 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
d := &driver{ d := &driver{
client: blobClient, client: blobClient,
container: container} container: container,
}
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
} }
@ -412,7 +413,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) {
Marker: marker, Marker: marker,
Prefix: virtPath, Prefix: virtPath,
}) })
if err != nil { if err != nil {
return out, err return out, err
} }

View file

@ -48,10 +48,8 @@ import (
"github.com/docker/go-metrics" "github.com/docker/go-metrics"
) )
var (
// storageAction is the metrics of blob related operations // storageAction is the metrics of blob related operations
storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action") var storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
)
func init() { func init() {
metrics.Register(prometheus.StorageNamespace) metrics.Register(prometheus.StorageNamespace)

View file

@ -52,8 +52,10 @@ type FileInfoInternal struct {
FileInfoFields FileInfoFields
} }
var _ FileInfo = FileInfoInternal{} var (
var _ FileInfo = &FileInfoInternal{} _ FileInfo = FileInfoInternal{}
_ FileInfo = &FileInfoInternal{}
)
// Path provides the full path of the target of this file info. // Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string { func (fi FileInfoInternal) Path() string {

View file

@ -149,7 +149,7 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte
// Reader retrieves an io.ReadCloser for the content stored at "path" with a // Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset. // given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path} return nil, storagedriver.PathNotFoundError{Path: path}
@ -173,11 +173,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) {
fullPath := d.fullPath(subPath) fullPath := d.fullPath(subPath)
parentDir := path.Dir(fullPath) parentDir := path.Dir(fullPath)
if err := os.MkdirAll(parentDir, 0777); err != nil { if err := os.MkdirAll(parentDir, 0o777); err != nil {
return nil, err return nil, err
} }
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -260,7 +260,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
return storagedriver.PathNotFoundError{Path: sourcePath} return storagedriver.PathNotFoundError{Path: sourcePath}
} }
if err := os.MkdirAll(path.Dir(dest), 0777); err != nil { if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil {
return err return err
} }

View file

@ -34,7 +34,6 @@ func init() {
} }
func TestFromParametersImpl(t *testing.T) { func TestFromParametersImpl(t *testing.T) {
tests := []struct { tests := []struct {
params map[string]interface{} // technically the yaml can contain anything params map[string]interface{} // technically the yaml can contain anything
expected DriverParameters expected DriverParameters
@ -109,5 +108,4 @@ func TestFromParametersImpl(t *testing.T) {
t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params)
} }
} }
} }

View file

@ -445,7 +445,6 @@ func putContentsClose(wc *storage.Writer, contents []byte) error {
// available for future calls to StorageDriver.GetContent and // available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader. // StorageDriver.Reader.
func (w *writer) Commit() error { func (w *writer) Commit() error {
if err := w.checkClosed(); err != nil { if err := w.checkClosed(); err != nil {
return err return err
} }

View file

@ -22,8 +22,10 @@ import (
// Hook up gocheck into the "go test" runner. // Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) } func Test(t *testing.T) { check.TestingT(t) }
var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) var (
var skipGCS func() string gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
skipGCS func() string
)
func init() { func init() {
bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET")

View file

@ -190,7 +190,6 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) {
} }
entries, err := found.(*dir).list(normalized) entries, err := found.(*dir).list(normalized)
if err != nil { if err != nil {
switch err { switch err {
case errNotExists: case errNotExists:

View file

@ -163,7 +163,6 @@ func (d *dir) mkdirs(p string) (*dir, error) {
components := strings.Split(relative, "/") components := strings.Split(relative, "/")
for _, component := range components { for _, component := range components {
d, err := dd.mkdir(component) d, err := dd.mkdir(component)
if err != nil { if err != nil {
// This should actually never happen, since there are no children. // This should actually never happen, since there are no children.
return nil, err return nil, err

View file

@ -98,7 +98,6 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio
// URLFor attempts to find a url which may be used to retrieve the file at the given path. // URLFor attempts to find a url which may be used to retrieve the file at the given path.
func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
if ac.StorageDriver.Name() != "oss" { if ac.StorageDriver.Name() != "oss" {
dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver")
return ac.StorageDriver.URLFor(ctx, path, options) return ac.StorageDriver.URLFor(ctx, path, options)
@ -112,5 +111,5 @@ func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, opti
// init registers the alicdn layerHandler backend. // init registers the alicdn layerHandler backend.
func init() { func init() {
storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCDNStorageMiddleware)) storagemiddleware.Register("alicdn", newAliCDNStorageMiddleware)
} }

View file

@ -1,6 +1,5 @@
// Package middleware - cloudfront wrapper for storage libs // Package middleware - cloudfront wrapper for storage libs
// N.B. currently only works with S3, not arbitrary sites // N.B. currently only works with S3, not arbitrary sites
//
package middleware package middleware
import ( import (
@ -34,12 +33,21 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{}
// newCloudFrontLayerHandler constructs and returns a new CloudFront // newCloudFrontLayerHandler constructs and returns a new CloudFront
// LayerHandler implementation. // LayerHandler implementation.
// Required options: baseurl, privatekey, keypairid //
// Required options:
// Optional options: ipFilteredBy, awsregion //
// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes // - baseurl
// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly // - privatekey
// awsregion: a comma separated string of AWS regions. // - keypairid
//
// Optional options:
//
// - ipFilteredBy
// - awsregion
// - ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP,
// default value. "aws", only aws IP goes to S3 directly. "awsregion", only
// regions listed in awsregion options goes to S3 directly
// - awsregion: a comma separated string of AWS regions.
func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
// parse baseurl // parse baseurl
base, ok := options["baseurl"] base, ok := options["baseurl"]
@ -211,5 +219,5 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string,
// init registers the cloudfront layerHandler backend. // init registers the cloudfront layerHandler backend.
func init() { func init() {
storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware)
} }

View file

@ -21,11 +21,10 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
} }
func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) { func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) {
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "example.com" options["baseurl"] = "example.com"
var privk = `-----BEGIN RSA PRIVATE KEY----- privk := `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV
VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n
F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB

View file

@ -113,7 +113,6 @@ func (s *awsIPs) tryUpdate() error {
if regionAllowed { if regionAllowed {
*output = append(*output, *network) *output = append(*output, *network)
} }
} }
for _, prefix := range response.Prefixes { for _, prefix := range response.Prefixes {

View file

@ -35,7 +35,6 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
w.Write(bytes) w.Write(bytes)
} }
func newTestHandler(data awsIPResponse) *httptest.Server { func newTestHandler(data awsIPResponse) *httptest.Server {
@ -68,7 +67,6 @@ func TestS3TryUpdate(t *testing.T) {
assertEqual(t, 1, len(ips.ipv4)) assertEqual(t, 1, len(ips.ipv4))
assertEqual(t, 0, len(ips.ipv6)) assertEqual(t, 0, len(ips.ipv6))
} }
func TestMatchIPV6(t *testing.T) { func TestMatchIPV6(t *testing.T) {
@ -215,7 +213,7 @@ func TestInvalidNetworkType(t *testing.T) {
} }
func TestParsing(t *testing.T) { func TestParsing(t *testing.T) {
var data = `{ data := `{
"prefixes": [{ "prefixes": [{
"ip_prefix": "192.168.0.0", "ip_prefix": "192.168.0.0",
"region": "someregion", "region": "someregion",

View file

@ -46,5 +46,5 @@ func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, opt
} }
func init() { func init() {
storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) storagemiddleware.Register("redirect", newRedirectStorageMiddleware)
} }

View file

@ -37,8 +37,10 @@ const driverName = "oss"
// OSS API requires multipart upload chunks to be at least 5MB // OSS API requires multipart upload chunks to be at least 5MB
const minChunkSize = 5 << 20 const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize const (
const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk defaultChunkSize = 2 * minChunkSize
defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
)
// listMax is the largest amount of objects you can request from OSS in a list call // listMax is the largest amount of objects you can request from OSS in a list call
const listMax = 1000 const listMax = 1000
@ -202,7 +204,6 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and
// bucketName // bucketName
func New(params DriverParameters) (*Driver, error) { func New(params DriverParameters) (*Driver, error) {
client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure)
client.SetEndpoint(params.Endpoint) client.SetEndpoint(params.Endpoint)
bucket := client.Bucket(params.Bucket) bucket := client.Bucket(params.Bucket)

View file

@ -24,15 +24,18 @@ var ossDriverConstructor func(rootDirectory string) (*Driver, error)
var skipCheck func() string var skipCheck func() string
func init() { func init() {
accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") var (
secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") accessKey = os.Getenv("ALIYUN_ACCESS_KEY_ID")
bucket := os.Getenv("OSS_BUCKET") secretKey = os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
region := os.Getenv("OSS_REGION") bucket = os.Getenv("OSS_BUCKET")
internal := os.Getenv("OSS_INTERNAL") region = os.Getenv("OSS_REGION")
encrypt := os.Getenv("OSS_ENCRYPT") internal = os.Getenv("OSS_INTERNAL")
secure := os.Getenv("OSS_SECURE") encrypt = os.Getenv("OSS_ENCRYPT")
endpoint := os.Getenv("OSS_ENDPOINT") secure = os.Getenv("OSS_SECURE")
encryptionKeyID := os.Getenv("OSS_ENCRYPTIONKEYID") endpoint = os.Getenv("OSS_ENDPOINT")
encryptionKeyID = os.Getenv("OSS_ENCRYPTIONKEYID")
)
root, err := ioutil.TempDir("", "driver-") root, err := ioutil.TempDir("", "driver-")
if err != nil { if err != nil {
panic(err) panic(err)

View file

@ -632,7 +632,6 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
Key: aws.String(d.s3Path(path)), Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
}) })
if err != nil { if err != nil {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" {
return ioutil.NopCloser(bytes.NewReader(nil)), nil return ioutil.NopCloser(bytes.NewReader(nil)), nil
@ -1166,16 +1165,22 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre
// directoryDiff finds all directories that are not in common between // directoryDiff finds all directories that are not in common between
// the previous and current paths in sorted order. // the previous and current paths in sorted order.
// //
// Eg 1 directoryDiff("/path/to/folder", "/path/to/folder/folder/file") // # Examples
// => [ "/path/to/folder/folder" ], //
// Eg 2 directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file") // directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
// => [ "/path/to/folder/folder2" ] // // => [ "/path/to/folder/folder" ]
// Eg 3 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file") //
// => [ "/path/to/folder/folder2" ] // directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
// Eg 4 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file") // // => [ "/path/to/folder/folder2" ]
// => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ] //
// Eg 5 directoryDiff("/", "/path/to/folder/folder/file") // directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
// => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ], // // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
// // => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
//
// directoryDiff("/", "/path/to/folder/folder/file")
// // => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ]
func directoryDiff(prev, current string) []string { func directoryDiff(prev, current string) []string {
var paths []string var paths []string

View file

@ -27,27 +27,32 @@ import (
// Hook up gocheck into the "go test" runner. // Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) } func Test(t *testing.T) { check.TestingT(t) }
var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) var (
var skipS3 func() string s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error)
skipS3 func() string
)
func init() { func init() {
accessKey := os.Getenv("AWS_ACCESS_KEY") var (
secretKey := os.Getenv("AWS_SECRET_KEY") accessKey = os.Getenv("AWS_ACCESS_KEY")
bucket := os.Getenv("S3_BUCKET") secretKey = os.Getenv("AWS_SECRET_KEY")
encrypt := os.Getenv("S3_ENCRYPT") bucket = os.Getenv("S3_BUCKET")
keyID := os.Getenv("S3_KEY_ID") encrypt = os.Getenv("S3_ENCRYPT")
secure := os.Getenv("S3_SECURE") keyID = os.Getenv("S3_KEY_ID")
skipVerify := os.Getenv("S3_SKIP_VERIFY") secure = os.Getenv("S3_SECURE")
v4Auth := os.Getenv("S3_V4_AUTH") skipVerify = os.Getenv("S3_SKIP_VERIFY")
region := os.Getenv("AWS_REGION") v4Auth = os.Getenv("S3_V4_AUTH")
objectACL := os.Getenv("S3_OBJECT_ACL") region = os.Getenv("AWS_REGION")
objectACL = os.Getenv("S3_OBJECT_ACL")
regionEndpoint = os.Getenv("REGION_ENDPOINT")
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
useDualStack = os.Getenv("S3_USE_DUALSTACK")
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate = os.Getenv("S3_ACCELERATE")
)
root, err := ioutil.TempDir("", "driver-") root, err := ioutil.TempDir("", "driver-")
regionEndpoint := os.Getenv("REGION_ENDPOINT")
forcePathStyle := os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken := os.Getenv("AWS_SESSION_TOKEN")
useDualStack := os.Getenv("S3_USE_DUALSTACK")
combineSmallPart := os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate := os.Getenv("S3_ACCELERATE")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -343,7 +348,7 @@ func TestDelete(t *testing.T) {
return false return false
} }
var objs = []string{ objs := []string{
"/file1", "/file1",
"/file1-2", "/file1-2",
"/file1/2", "/file1/2",
@ -411,7 +416,7 @@ func TestDelete(t *testing.T) {
} }
// objects to skip auto-created test case // objects to skip auto-created test case
var skipCase = map[string]bool{ skipCase := map[string]bool{
// special case where deleting "/file1" also deletes "/file1/2" is tested explicitly // special case where deleting "/file1" also deletes "/file1/2" is tested explicitly
"/file1": true, "/file1": true,
} }
@ -536,7 +541,7 @@ func TestWalk(t *testing.T) {
t.Fatalf("unexpected error creating driver with standard storage: %v", err) t.Fatalf("unexpected error creating driver with standard storage: %v", err)
} }
var fileset = []string{ fileset := []string{
"/file1", "/file1",
"/folder1/file1", "/folder1/file1",
"/folder2/file1", "/folder2/file1",

View file

@ -24,29 +24,6 @@ var swiftDriverConstructor func(prefix string) (*Driver, error)
func init() { func init() {
var ( var (
username string
password string
authURL string
tenant string
tenantID string
domain string
domainID string
tenantDomain string
tenantDomainID string
trustID string
container string
region string
AuthVersion int
endpointType string
insecureSkipVerify bool
secretKey string
accessKey string
containerKey bool
tempURLMethods []string
swiftServer *swifttest.SwiftServer
err error
)
username = os.Getenv("SWIFT_USERNAME") username = os.Getenv("SWIFT_USERNAME")
password = os.Getenv("SWIFT_PASSWORD") password = os.Getenv("SWIFT_PASSWORD")
authURL = os.Getenv("SWIFT_AUTH_URL") authURL = os.Getenv("SWIFT_AUTH_URL")
@ -67,6 +44,10 @@ func init() {
containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY"))
tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",")
swiftServer *swifttest.SwiftServer
err error
)
if username == "" || password == "" || authURL == "" || container == "" { if username == "" || password == "" || authURL == "" || container == "" {
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
panic(err) panic(err)

View file

@ -116,7 +116,8 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) {
"/a-.b", "/a-.b",
"/_.abc", "/_.abc",
"/Docker/docker-registry", "/Docker/docker-registry",
"/Abc/Cba"} "/Abc/Cba",
}
for _, filename := range validFiles { for _, filename := range validFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -154,7 +155,8 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) {
"abc", "abc",
"123.abc", "123.abc",
"//bcd", "//bcd",
"/abc_123/"} "/abc_123/",
}
for _, filename := range invalidFiles { for _, filename := range invalidFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -1175,8 +1177,10 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c
c.Assert(readContents, check.DeepEquals, contents) c.Assert(readContents, check.DeepEquals, contents)
} }
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") var (
var separatorChars = []byte("._-") filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string { func randomPath(length int64) string {
path := "/" path := "/"

View file

@ -16,6 +16,7 @@ type changingFileSystem struct {
func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) { func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) {
return cfs.fileset, nil return cfs.fileset, nil
} }
func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) { func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
kept, ok := cfs.keptFiles[path] kept, ok := cfs.keptFiles[path]
if ok && kept { if ok && kept {
@ -48,6 +49,7 @@ func (cfs *fileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
}, },
}, nil }, nil
} }
func (cfs *fileSystem) isDir(path string) bool { func (cfs *fileSystem) isDir(path string) bool {
_, isDir := cfs.fileset[path] _, isDir := cfs.fileset[path]
return isDir return isDir
@ -167,7 +169,6 @@ func TestWalkFallback(t *testing.T) {
compareWalked(t, tc.expected, walked) compareWalked(t, tc.expected, walked)
}) })
} }
} }
func compareWalked(t *testing.T, expected, walked []string) { func compareWalked(t *testing.T, expected, walked []string) {

View file

@ -61,7 +61,6 @@ func TestFileReaderSeek(t *testing.T) {
} }
fr, err := newFileReader(ctx, driver, path, int64(len(content))) fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil { if err != nil {
t.Fatalf("unexpected error creating file reader: %v", err) t.Fatalf("unexpected error creating file reader: %v", err)
} }

View file

@ -109,7 +109,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return err return err
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to mark: %v", err) return fmt.Errorf("failed to mark: %v", err)
} }

View file

@ -173,7 +173,8 @@ func TestNoDeletionNoEffect(t *testing.T) {
// construct manifestlist for fun. // construct manifestlist for fun.
blobstatter := registry.BlobStatter() blobstatter := registry.BlobStatter()
manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{
image1.manifestDigest, image2.manifestDigest}) image1.manifestDigest, image2.manifestDigest,
})
if err != nil { if err != nil {
t.Fatalf("Failed to make manifest list: %v", err) t.Fatalf("Failed to make manifest list: %v", err)
} }

View file

@ -150,7 +150,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: uuid, id: uuid,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -159,7 +158,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: uuid, id: uuid,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -179,7 +177,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: id, id: id,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -203,7 +200,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: id, id: id,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -540,7 +540,6 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
if payloadMediaType != v1.MediaTypeImageIndex { if payloadMediaType != v1.MediaTypeImageIndex {
t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType) t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType)
} }
} }
// TestLinkPathFuncs ensures that the link path functions behavior are locked // TestLinkPathFuncs ensures that the link path functions behavior are locked

View file

@ -24,24 +24,32 @@ const (
// The path layout in the storage backend is roughly as follows: // The path layout in the storage backend is roughly as follows:
// //
// <root>/v2 // <root>/v2
// -> repositories/ // ├── blob
// -><name>/ // │ └── <algorithm>
// -> _manifests/ // │ └── <split directory content addressable storage>
// revisions // └── repositories
// -> <manifest digest path> // └── <name>
// -> link // ├── _layers
// tags/<tag> // │ └── <layer links to blob store>
// -> current/link // ├── _manifests
// -> index // │ ├── revisions
// -> <algorithm>/<hex digest>/link // │ │ └── <manifest digest path>
// -> _layers/ // │ │ └── link
// <layer links to blob store> // │ └── tags
// -> _uploads/<id> // │ └── <tag>
// data // │ ├── current
// startedat // │ │ └── link
// hashstates/<algorithm>/<offset> // │ └── index
// -> blob/<algorithm> // │ └── <algorithm>
// <split directory content addressable storage> // │ └── <hex digest>
// │ └── link
// └── _uploads
// └── <id>
// ├── data
// ├── hashstates
// │ └── <algorithm>
// │ └── <offset>
// └── startedat
// //
// The storage backend layout is broken up into a content-addressable blob // The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data // store and repositories. The content-addressable blob store holds most data
@ -105,7 +113,6 @@ const (
// For more information on the semantic meaning of each path and their // For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation. // contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) { func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At // Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to // first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we // accomplish this. By keep the formatting separate from the pathSpec, we
@ -135,7 +142,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec: case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec(v)) root, err := pathFor(manifestRevisionPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -147,7 +153,6 @@ func pathFor(spec pathSpec) (string, error) {
root, err := pathFor(manifestTagsPathSpec{ root, err := pathFor(manifestTagsPathSpec{
name: v.name, name: v.name,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -155,7 +160,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, v.tag), nil return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec: case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec(v)) root, err := pathFor(manifestTagPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -163,7 +167,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "current", "link"), nil return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec: case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec(v)) root, err := pathFor(manifestTagPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -171,7 +174,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "index"), nil return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec: case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec(v)) root, err := pathFor(manifestTagIndexEntryPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -182,7 +184,6 @@ func pathFor(spec pathSpec) (string, error) {
name: v.name, name: v.name,
tag: v.tag, tag: v.tag,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -431,8 +432,7 @@ type uploadHashStatePathSpec struct {
func (uploadHashStatePathSpec) pathSpec() {} func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories // repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct { type repositoriesRootPathSpec struct{}
}
func (repositoriesRootPathSpec) pathSpec() {} func (repositoriesRootPathSpec) pathSpec() {}
@ -445,7 +445,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
// groups of digest folder. It will be as follows: // groups of digest folder. It will be as follows:
// //
// <algorithm>/<first two bytes of digest>/<full digest> // <algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil { if err := dgst.Validate(); err != nil {
return nil, err return nil, err
@ -468,7 +467,6 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
// Reconstructs a digest from a path // Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) { func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data") digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath) dir, hex := path.Split(digestPath)
dir = path.Dir(dir) dir = path.Dir(dir)

View file

@ -108,7 +108,6 @@ func TestPathMapper(t *testing.T) {
if err == nil { if err == nil {
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
} }
} }
func TestDigestFromPath(t *testing.T) { func TestDigestFromPath(t *testing.T) {
@ -132,7 +131,6 @@ func TestDigestFromPath(t *testing.T) {
if result != testcase.expected { if result != testcase.expected {
t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected)
} }
} }
} }

View file

@ -98,7 +98,6 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv
} else { } else {
errors = pushError(errors, filePath, err) errors = pushError(errors, filePath, err)
} }
} }
uploads[uuid] = ud uploads[uuid] = ud

View file

@ -38,7 +38,6 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file") t.Fatalf("Unable to write startedAt file")
} }
} }
func TestPurgeGather(t *testing.T) { func TestPurgeGather(t *testing.T) {

View file

@ -62,7 +62,6 @@ func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descr
name: ts.repository.Named().Name(), name: ts.repository.Named().Name(),
tag: tag, tag: tag,
}) })
if err != nil { if err != nil {
return err return err
} }
@ -84,7 +83,6 @@ func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descripto
name: ts.repository.Named().Name(), name: ts.repository.Named().Name(),
tag: tag, tag: tag,
}) })
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
@ -174,7 +172,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([
} }
func (ts *tagStore) ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) { func (ts *tagStore) ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) {
var tagLinkPath = func(name string, dgst digest.Digest) (string, error) { tagLinkPath := func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{ return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name, name: name,
tag: tag, tag: tag,

View file

@ -163,7 +163,6 @@ func TestTagStoreAll(t *testing.T) {
t.Errorf("unexpected tag in enumerate %s", removed) t.Errorf("unexpected tag in enumerate %s", removed)
} }
} }
} }
func TestTagLookup(t *testing.T) { func TestTagLookup(t *testing.T) {

View file

@ -18,6 +18,7 @@ var _ ManifestHandler = &v1UnsupportedHandler{}
func (v *v1UnsupportedHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { func (v *v1UnsupportedHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
return v.innerHandler.Unmarshal(ctx, dgst, content) return v.innerHandler.Unmarshal(ctx, dgst, content)
} }
func (v *v1UnsupportedHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { func (v *v1UnsupportedHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
return digest.Digest(""), distribution.ErrSchemaV1Unsupported return digest.Digest(""), distribution.ErrSchemaV1Unsupported
} }

View file

@ -23,7 +23,7 @@ func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) {
// Perturb this on each iteration of the loop below. // Perturb this on each iteration of the loop below.
header := &tar.Header{ header := &tar.Header{
Mode: 0644, Mode: 0o644,
ModTime: time.Now(), ModTime: time.Now(),
Typeflag: tar.TypeReg, Typeflag: tar.TypeReg,
Uname: "randocalrissian", Uname: "randocalrissian",

View file

@ -15,7 +15,6 @@ import (
// with version "v2.0" would print the following: // with version "v2.0" would print the following:
// //
// registry github.com/distribution/distribution v2.0 // registry github.com/distribution/distribution v2.0
//
func FprintVersion(w io.Writer) { func FprintVersion(w io.Writer) {
fmt.Fprintln(w, os.Args[0], Package, Version) fmt.Fprintln(w, os.Args[0], Package, Version)
} }