forked from TrueCloudLab/distribution
Enable static checks
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
This commit is contained in:
parent
32e2260be2
commit
db0a4ec1c8
33 changed files with 116 additions and 96 deletions
|
@ -5,6 +5,8 @@
|
|||
"EnableGC": true,
|
||||
"Enable": [
|
||||
"structcheck",
|
||||
"staticcheck",
|
||||
"unconvert",
|
||||
|
||||
"gofmt",
|
||||
"golint",
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestLookup(t *testing.T) {
|
|||
}
|
||||
assertEqualDigests(t, dgst, digests[3])
|
||||
|
||||
dgst, err = dset.Lookup("1234")
|
||||
_, err = dset.Lookup("1234")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: 1234")
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func TestLookup(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("9876")
|
||||
_, err = dset.Lookup("9876")
|
||||
if err == nil {
|
||||
t.Fatal("Expected not found error looking up: 9876")
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func TestLookup(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("sha256:1234")
|
||||
_, err = dset.Lookup("sha256:1234")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: sha256:1234")
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ func RegisterFunc(name string, check func() error) {
|
|||
// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
|
||||
// from an arbitrary func() error.
|
||||
func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
|
||||
registry.Register(name, PeriodicChecker(CheckFunc(check), period))
|
||||
registry.Register(name, PeriodicChecker(check, period))
|
||||
}
|
||||
|
||||
// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
|
||||
|
@ -227,7 +227,7 @@ func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
|
|||
// RegisterPeriodicThresholdFunc allows the convenience of registering a
|
||||
// PeriodicChecker from an arbitrary func() error.
|
||||
func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) {
|
||||
registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold))
|
||||
registry.Register(name, PeriodicThresholdChecker(check, period, threshold))
|
||||
}
|
||||
|
||||
// RegisterPeriodicThresholdFunc allows the convenience of registering a
|
||||
|
|
|
@ -59,7 +59,7 @@ func TestEmptyTar(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("NewReader returned error: %v", err)
|
||||
}
|
||||
n, err := gzipReader.Read(decompressed[:])
|
||||
n, _ := gzipReader.Read(decompressed[:])
|
||||
if n != 1024 {
|
||||
t.Fatalf("read returned %d bytes; expected 1024", n)
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestManifest(t *testing.T) {
|
|||
t.Fatalf("error creating DeserializedManifest: %v", err)
|
||||
}
|
||||
|
||||
mediaType, canonical, err := deserialized.Payload()
|
||||
mediaType, canonical, _ := deserialized.Payload()
|
||||
|
||||
if mediaType != MediaTypeManifest {
|
||||
t.Fatalf("unexpected media type: %s", mediaType)
|
||||
|
|
|
@ -31,7 +31,7 @@ func TestBroadcaster(t *testing.T) {
|
|||
wg.Add(1)
|
||||
go func(block ...Event) {
|
||||
if err := b.Write(block...); err != nil {
|
||||
t.Fatalf("error writing block of length %d: %v", len(block), err)
|
||||
t.Errorf("error writing block of length %d: %v", len(block), err)
|
||||
}
|
||||
wg.Done()
|
||||
}(block...)
|
||||
|
@ -41,6 +41,9 @@ func TestBroadcaster(t *testing.T) {
|
|||
}
|
||||
|
||||
wg.Wait() // Wait until writes complete
|
||||
if t.Failed() {
|
||||
t.FailNow()
|
||||
}
|
||||
checkClose(t, b)
|
||||
|
||||
// Iterate through the sinks and check that they all have the expected length.
|
||||
|
@ -79,7 +82,7 @@ func TestEventQueue(t *testing.T) {
|
|||
wg.Add(1)
|
||||
go func(block ...Event) {
|
||||
if err := eq.Write(block...); err != nil {
|
||||
t.Fatalf("error writing event block: %v", err)
|
||||
t.Errorf("error writing event block: %v", err)
|
||||
}
|
||||
wg.Done()
|
||||
}(block...)
|
||||
|
@ -89,6 +92,9 @@ func TestEventQueue(t *testing.T) {
|
|||
}
|
||||
|
||||
wg.Wait()
|
||||
if t.Failed() {
|
||||
t.FailNow()
|
||||
}
|
||||
checkClose(t, eq)
|
||||
|
||||
ts.mu.Lock()
|
||||
|
@ -177,7 +183,7 @@ func TestRetryingSink(t *testing.T) {
|
|||
go func(block ...Event) {
|
||||
defer wg.Done()
|
||||
if err := s.Write(block...); err != nil {
|
||||
t.Fatalf("error writing event block: %v", err)
|
||||
t.Errorf("error writing event block: %v", err)
|
||||
}
|
||||
}(block...)
|
||||
|
||||
|
@ -186,6 +192,9 @@ func TestRetryingSink(t *testing.T) {
|
|||
}
|
||||
|
||||
wg.Wait()
|
||||
if t.Failed() {
|
||||
t.FailNow()
|
||||
}
|
||||
checkClose(t, s)
|
||||
|
||||
ts.mu.Lock()
|
||||
|
|
|
@ -38,7 +38,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
|
|||
return auth.ErrAuthenticationFailure
|
||||
}
|
||||
|
||||
err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password))
|
||||
err := bcrypt.CompareHashAndPassword(credentials, []byte(password))
|
||||
if err != nil {
|
||||
return auth.ErrAuthenticationFailure
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||
|
||||
lastReaderOffset := hrs.readerOffset
|
||||
|
||||
if whence == os.SEEK_SET && hrs.rc == nil {
|
||||
if whence == io.SeekStart && hrs.rc == nil {
|
||||
// If no request has been made yet, and we are seeking to an
|
||||
// absolute position, set the read offset as well to avoid an
|
||||
// unnecessary request.
|
||||
|
@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||
newOffset := hrs.seekOffset
|
||||
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
case io.SeekCurrent:
|
||||
newOffset += offset
|
||||
case os.SEEK_END:
|
||||
case io.SeekEnd:
|
||||
if hrs.size < 0 {
|
||||
return 0, errors.New("content length not known")
|
||||
}
|
||||
newOffset = hrs.size + offset
|
||||
case os.SEEK_SET:
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
}
|
||||
|
||||
|
|
|
@ -512,8 +512,8 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
|
|||
|
||||
// ------------------------------------------
|
||||
// Now, actually do successful upload.
|
||||
layerLength, _ := layerFile.Seek(0, os.SEEK_END)
|
||||
layerFile.Seek(0, os.SEEK_SET)
|
||||
layerLength, _ := layerFile.Seek(0, io.SeekEnd)
|
||||
layerFile.Seek(0, io.SeekStart)
|
||||
|
||||
uploadURLBase, _ = startPushLayer(t, env, imageName)
|
||||
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
|
||||
|
@ -674,12 +674,12 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
|
|||
|
||||
// ----------------
|
||||
// Reupload previously deleted blob
|
||||
layerFile.Seek(0, os.SEEK_SET)
|
||||
layerFile.Seek(0, io.SeekStart)
|
||||
|
||||
uploadURLBase, _ := startPushLayer(t, env, imageName)
|
||||
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
|
||||
|
||||
layerFile.Seek(0, os.SEEK_SET)
|
||||
layerFile.Seek(0, io.SeekStart)
|
||||
canonicalDigester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
|
||||
t.Fatalf("error copying to digest: %v", err)
|
||||
|
@ -693,7 +693,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
|
|||
t.Fatalf("unexpected error checking head on existing layer: %v", err)
|
||||
}
|
||||
|
||||
layerLength, _ := layerFile.Seek(0, os.SEEK_END)
|
||||
layerLength, _ := layerFile.Seek(0, io.SeekEnd)
|
||||
checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK)
|
||||
checkHeaders(t, resp, http.Header{
|
||||
"Content-Length": []string{fmt.Sprint(layerLength)},
|
||||
|
|
|
@ -36,7 +36,7 @@ func (mail *mailer) sendMail(subject, message string) error {
|
|||
auth,
|
||||
mail.From,
|
||||
mail.To,
|
||||
[]byte(msg),
|
||||
msg,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
// todo(richardscothern): from cache control header or config file
|
||||
const blobTTL = time.Duration(24 * 7 * time.Hour)
|
||||
const blobTTL = 24 * 7 * time.Hour
|
||||
|
||||
type proxyBlobStore struct {
|
||||
localStore distribution.BlobStore
|
||||
|
|
|
@ -350,24 +350,30 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
|
|||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("GET", "", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
t.Errorf(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes := w.Body.Bytes()
|
||||
localDigest := digest.FromBytes(bodyBytes)
|
||||
if localDigest != remoteBlob.Digest {
|
||||
t.Fatalf("Mismatching blob fetch from proxy")
|
||||
t.Errorf("Mismatching blob fetch from proxy")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if t.Failed() {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
remoteBlobCount := len(te.inRemote)
|
||||
sbsMu.Lock()
|
||||
|
@ -404,7 +410,6 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
|
|||
}
|
||||
}
|
||||
|
||||
localStats = te.LocalStats()
|
||||
remoteStats = te.RemoteStats()
|
||||
|
||||
// Ensure remote unchanged
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
// todo(richardscothern): from cache control header or config
|
||||
const repositoryTTL = time.Duration(24 * 7 * time.Hour)
|
||||
const repositoryTTL = 24 * 7 * time.Hour
|
||||
|
||||
type proxyManifestStore struct {
|
||||
ctx context.Context
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
@ -96,7 +95,7 @@ func TestSimpleBlobUpload(t *testing.T) {
|
|||
}
|
||||
|
||||
// Do a resume, get unknown upload
|
||||
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
|
||||
_, err = bs.Resume(ctx, blobUpload.ID())
|
||||
if err != distribution.ErrBlobUploadUnknown {
|
||||
t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
|
||||
}
|
||||
|
@ -278,7 +277,7 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
t.Fatalf("expected not found error when testing for existence: %v", err)
|
||||
}
|
||||
|
||||
rc, err := bs.Open(ctx, dgst)
|
||||
_, err = bs.Open(ctx, dgst)
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
t.Fatalf("expected not found error when opening non-existent blob: %v", err)
|
||||
}
|
||||
|
@ -300,7 +299,7 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
|
||||
}
|
||||
|
||||
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
|
||||
rc, err := bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
|
||||
if err != nil {
|
||||
t.Fatalf("error opening blob with %v: %v", dgst, err)
|
||||
}
|
||||
|
@ -323,7 +322,7 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now seek back the blob, read the whole thing and check against randomLayerData
|
||||
offset, err := rc.Seek(0, os.SEEK_SET)
|
||||
offset, err := rc.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
t.Fatalf("error seeking blob: %v", err)
|
||||
}
|
||||
|
@ -342,7 +341,7 @@ func TestSimpleBlobRead(t *testing.T) {
|
|||
}
|
||||
|
||||
// Reset the randomLayerReader and read back the buffer
|
||||
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
|
||||
_, err = randomLayerReader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
t.Fatalf("error resetting layer reader: %v", err)
|
||||
}
|
||||
|
@ -397,7 +396,7 @@ func TestBlobMount(t *testing.T) {
|
|||
t.Fatalf("error getting seeker size of random data: %v", err)
|
||||
}
|
||||
|
||||
nn, err := io.Copy(blobUpload, randomDataReader)
|
||||
_, err = io.Copy(blobUpload, randomDataReader)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error uploading layer data: %v", err)
|
||||
}
|
||||
|
@ -460,7 +459,7 @@ func TestBlobMount(t *testing.T) {
|
|||
defer rc.Close()
|
||||
|
||||
h := sha256.New()
|
||||
nn, err = io.Copy(h, rc)
|
||||
nn, err := io.Copy(h, rc)
|
||||
if err != nil {
|
||||
t.Fatalf("error reading layer: %v", err)
|
||||
}
|
||||
|
@ -573,17 +572,17 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec
|
|||
// the original state, returning the size. The state of the seeker should be
|
||||
// treated as unknown if an error is returned.
|
||||
func seekerSize(seeker io.ReadSeeker) (int64, error) {
|
||||
current, err := seeker.Seek(0, os.SEEK_CUR)
|
||||
current, err := seeker.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
end, err := seeker.Seek(0, os.SEEK_END)
|
||||
end, err := seeker.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resumed, err := seeker.Seek(current, os.SEEK_SET)
|
||||
resumed, err := seeker.Seek(current, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error {
|
|||
return errResumableDigestNotAvailable
|
||||
}
|
||||
offset := bw.fileWriter.Size()
|
||||
if offset == int64(h.Len()) {
|
||||
if offset == h.Len() {
|
||||
// State of digester is already at the requested offset.
|
||||
return nil
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Mind the gap.
|
||||
if gapLen := offset - int64(h.Len()); gapLen > 0 {
|
||||
if gapLen := offset - h.Len(); gapLen > 0 {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
|
|||
name: bw.blobStore.repository.Named().String(),
|
||||
id: bw.id,
|
||||
alg: bw.digester.Digest().Algorithm(),
|
||||
offset: int64(h.Len()),
|
||||
offset: h.Len(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
4
registry/storage/cache/cachecheck/suite.go
vendored
4
registry/storage/cache/cachecheck/suite.go
vendored
|
@ -26,12 +26,12 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T,
|
|||
t.Fatalf("expected unknown blob error with empty store: %v", err)
|
||||
}
|
||||
|
||||
cache, err := provider.RepositoryScoped("")
|
||||
_, err := provider.RepositoryScoped("")
|
||||
if err == nil {
|
||||
t.Fatalf("expected an error when asking for invalid repo")
|
||||
}
|
||||
|
||||
cache, err = provider.RepositoryScoped("foo/bar")
|
||||
cache, err := provider.RepositoryScoped("foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting repository: %v", err)
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ func BenchmarkPathCompareNative(B *testing.B) {
|
|||
|
||||
for i := 0; i < B.N; i++ {
|
||||
c := a < b
|
||||
c = c && false
|
||||
_ = c && false
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,7 +285,7 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) {
|
|||
|
||||
for i := 0; i < B.N; i++ {
|
||||
c := a < b
|
||||
c = c && false
|
||||
_ = c && false
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
|||
return nil, err
|
||||
}
|
||||
info := blobRef.Properties
|
||||
size := int64(info.ContentLength)
|
||||
size := info.ContentLength
|
||||
if offset >= size {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
|
|||
|
||||
return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{
|
||||
Path: path,
|
||||
Size: int64(blobProperties.ContentLength),
|
||||
Size: blobProperties.ContentLength,
|
||||
ModTime: time.Time(blobProperties.LastModified),
|
||||
IsDir: false,
|
||||
}}, nil
|
||||
|
|
|
@ -184,11 +184,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
|
|||
return nil, err
|
||||
}
|
||||
|
||||
seekPos, err := file.Seek(int64(offset), os.SEEK_SET)
|
||||
seekPos, err := file.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
} else if seekPos < int64(offset) {
|
||||
} else if seekPos < offset {
|
||||
file.Close()
|
||||
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
||||
}
|
||||
|
@ -217,12 +217,12 @@ func (d *driver) Writer(ctx context.Context, subPath string, append bool) (stora
|
|||
return nil, err
|
||||
}
|
||||
} else {
|
||||
n, err := fp.Seek(0, os.SEEK_END)
|
||||
n, err := fp.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
fp.Close()
|
||||
return nil, err
|
||||
}
|
||||
offset = int64(n)
|
||||
offset = n
|
||||
}
|
||||
|
||||
return newFileWriter(fp, offset), nil
|
||||
|
|
|
@ -86,7 +86,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o
|
|||
return nil, fmt.Errorf("failed to read privatekey file: %s", err)
|
||||
}
|
||||
|
||||
block, _ := pem.Decode([]byte(pkBytes))
|
||||
block, _ := pem.Decode(pkBytes)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("failed to decode private key as an rsa private key")
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
check "gopkg.in/check.v1"
|
||||
|
@ -36,7 +37,7 @@ func (s *MiddlewareSuite) TestHttpsPort(c *check.C) {
|
|||
c.Assert(m.scheme, check.Equals, "https")
|
||||
c.Assert(m.host, check.Equals, "example.com:5443")
|
||||
|
||||
url, err := middleware.URLFor(nil, "/rick/data", nil)
|
||||
url, err := middleware.URLFor(context.TODO(), "/rick/data", nil)
|
||||
c.Assert(err, check.Equals, nil)
|
||||
c.Assert(url, check.Equals, "https://example.com:5443/rick/data")
|
||||
}
|
||||
|
@ -52,7 +53,7 @@ func (s *MiddlewareSuite) TestHTTP(c *check.C) {
|
|||
c.Assert(m.scheme, check.Equals, "http")
|
||||
c.Assert(m.host, check.Equals, "example.com")
|
||||
|
||||
url, err := middleware.URLFor(nil, "morty/data", nil)
|
||||
url, err := middleware.URLFor(context.TODO(), "morty/data", nil)
|
||||
c.Assert(err, check.Equals, nil)
|
||||
c.Assert(url, check.Equals, "http://example.com/morty/data")
|
||||
}
|
||||
|
|
|
@ -197,14 +197,14 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|||
regionEndpoint = ""
|
||||
}
|
||||
|
||||
regionName, ok := parameters["region"]
|
||||
regionName := parameters["region"]
|
||||
if regionName == nil || fmt.Sprint(regionName) == "" {
|
||||
return nil, fmt.Errorf("No region parameter provided")
|
||||
}
|
||||
region := fmt.Sprint(regionName)
|
||||
// Don't check the region value if a custom endpoint is provided.
|
||||
if regionEndpoint == "" {
|
||||
if _, ok = validRegions[region]; !ok {
|
||||
if _, ok := validRegions[region]; !ok {
|
||||
return nil, fmt.Errorf("Invalid region provided: %v", region)
|
||||
}
|
||||
}
|
||||
|
@ -398,6 +398,10 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
}
|
||||
|
||||
awsConfig := aws.NewConfig()
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new session: %v", err)
|
||||
}
|
||||
creds := credentials.NewChainCredentials([]credentials.Provider{
|
||||
&credentials.StaticProvider{
|
||||
Value: credentials.Value{
|
||||
|
@ -408,7 +412,7 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess)},
|
||||
})
|
||||
|
||||
if params.RegionEndpoint != "" {
|
||||
|
@ -426,7 +430,11 @@ func New(params DriverParameters) (*Driver, error) {
|
|||
})
|
||||
}
|
||||
|
||||
s3obj := s3.New(session.New(awsConfig))
|
||||
sess, err = session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new session with aws config: %v", err)
|
||||
}
|
||||
s3obj := s3.New(sess)
|
||||
|
||||
// enable S3 compatible signature v2 signing instead
|
||||
if !params.V4Auth {
|
||||
|
@ -1150,10 +1158,10 @@ func (w *writer) Write(p []byte) (int, error) {
|
|||
Bucket: aws.String(w.driver.Bucket),
|
||||
Key: aws.String(w.key),
|
||||
})
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
w.parts = nil
|
||||
w.readyPart, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
|
|
|
@ -139,14 +139,14 @@ func TestEmptyRootList(t *testing.T) {
|
|||
}
|
||||
defer rootedDriver.Delete(ctx, filename)
|
||||
|
||||
keys, err := emptyRootDriver.List(ctx, "/")
|
||||
keys, _ := emptyRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
}
|
||||
}
|
||||
|
||||
keys, err = slashRootDriver.List(ctx, "/")
|
||||
keys, _ = slashRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
|
|
|
@ -209,9 +209,9 @@ func (v2 *signer) Sign() error {
|
|||
v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
if expires {
|
||||
params["Signature"] = []string{string(v2.signature)}
|
||||
params["Signature"] = []string{v2.signature}
|
||||
} else {
|
||||
headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)}
|
||||
headers["Authorization"] = []string{"AWS " + accessKey + ":" + v2.signature}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
|
|
|
@ -125,14 +125,14 @@ func TestEmptyRootList(t *testing.T) {
|
|||
}
|
||||
defer rootedDriver.Delete(ctx, filename)
|
||||
|
||||
keys, err := emptyRootDriver.List(ctx, "/")
|
||||
keys, _ := emptyRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
}
|
||||
}
|
||||
|
||||
keys, err = slashRootDriver.List(ctx, "/")
|
||||
keys, _ = slashRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
|
|
|
@ -148,14 +148,14 @@ func TestEmptyRootList(t *testing.T) {
|
|||
t.Fatalf("unexpected error creating content: %v", err)
|
||||
}
|
||||
|
||||
keys, err := emptyRootDriver.List(ctx, "/")
|
||||
keys, _ := emptyRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
}
|
||||
}
|
||||
|
||||
keys, err = slashRootDriver.List(ctx, "/")
|
||||
keys, _ = slashRootDriver.List(ctx, "/")
|
||||
for _, path := range keys {
|
||||
if !storagedriver.PathRegexp.MatchString(path) {
|
||||
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
|
||||
|
@ -234,11 +234,11 @@ func TestFilenameChunking(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test 0 and < 0 sizes
|
||||
actual, err = chunkFilenames(nil, 0)
|
||||
_, err = chunkFilenames(nil, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for size = 0")
|
||||
}
|
||||
actual, err = chunkFilenames(nil, -1)
|
||||
_, err = chunkFilenames(nil, -1)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for size = -1")
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func (suite *DriverSuite) deletePath(c *check.C, path string) {
|
|||
err = nil
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
paths, err := suite.StorageDriver.List(suite.ctx, path)
|
||||
paths, _ := suite.StorageDriver.List(suite.ctx, path)
|
||||
if len(paths) == 0 {
|
||||
break
|
||||
}
|
||||
|
@ -651,7 +651,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) {
|
|||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
response, err = http.Head(url)
|
||||
response, _ = http.Head(url)
|
||||
c.Assert(response.StatusCode, check.Equals, 200)
|
||||
c.Assert(response.ContentLength, check.Equals, int64(32))
|
||||
}
|
||||
|
@ -1116,7 +1116,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
tf.Sync()
|
||||
tf.Seek(0, os.SEEK_SET)
|
||||
tf.Seek(0, io.SeekStart)
|
||||
|
||||
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
@ -81,12 +80,12 @@ func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
|
|||
newOffset := fr.offset
|
||||
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
newOffset += int64(offset)
|
||||
case os.SEEK_END:
|
||||
newOffset = fr.size + int64(offset)
|
||||
case os.SEEK_SET:
|
||||
newOffset = int64(offset)
|
||||
case io.SeekCurrent:
|
||||
newOffset += offset
|
||||
case io.SeekEnd:
|
||||
newOffset = fr.size + offset
|
||||
case io.SeekStart:
|
||||
newOffset = offset
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
|
@ -72,7 +71,7 @@ func TestFileReaderSeek(t *testing.T) {
|
|||
for _, repitition := range mrand.Perm(repititions - 1) {
|
||||
targetOffset := int64(len(pattern) * repitition)
|
||||
// Seek to a multiple of pattern size and read pattern size bytes
|
||||
offset, err := fr.Seek(targetOffset, os.SEEK_SET)
|
||||
offset, err := fr.Seek(targetOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error seeking: %v", err)
|
||||
}
|
||||
|
@ -97,7 +96,7 @@ func TestFileReaderSeek(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check offset
|
||||
current, err := fr.Seek(0, os.SEEK_CUR)
|
||||
current, err := fr.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
t.Fatalf("error checking current offset: %v", err)
|
||||
}
|
||||
|
@ -107,7 +106,7 @@ func TestFileReaderSeek(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
start, err := fr.Seek(0, os.SEEK_SET)
|
||||
start, err := fr.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
t.Fatalf("error seeking to start: %v", err)
|
||||
}
|
||||
|
@ -116,7 +115,7 @@ func TestFileReaderSeek(t *testing.T) {
|
|||
t.Fatalf("expected to seek to start: %v != 0", start)
|
||||
}
|
||||
|
||||
end, err := fr.Seek(0, os.SEEK_END)
|
||||
end, err := fr.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
t.Fatalf("error checking current offset: %v", err)
|
||||
}
|
||||
|
@ -128,13 +127,13 @@ func TestFileReaderSeek(t *testing.T) {
|
|||
// 4. Seek before start, ensure error.
|
||||
|
||||
// seek before start
|
||||
before, err := fr.Seek(-1, os.SEEK_SET)
|
||||
before, err := fr.Seek(-1, io.SeekStart)
|
||||
if err == nil {
|
||||
t.Fatalf("error expected, returned offset=%v", before)
|
||||
}
|
||||
|
||||
// 5. Seek after end,
|
||||
after, err := fr.Seek(1, os.SEEK_END)
|
||||
after, err := fr.Seek(1, io.SeekEnd)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error expected, returned offset=%v", after)
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ func TestNoDeletionNoEffect(t *testing.T) {
|
|||
|
||||
registry := createRegistry(t, inmemoryDriver)
|
||||
repo := makeRepository(t, registry, "palailogos")
|
||||
manifestService, err := repo.Manifests(ctx)
|
||||
manifestService, _ := repo.Manifests(ctx)
|
||||
|
||||
image1 := uploadRandomSchema1Image(t, repo)
|
||||
image2 := uploadRandomSchema1Image(t, repo)
|
||||
|
@ -206,7 +206,7 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) {
|
|||
|
||||
registry := createRegistry(t, inmemoryDriver)
|
||||
repo := makeRepository(t, registry, "deletemanifests")
|
||||
manifestService, err := repo.Manifests(ctx)
|
||||
manifestService, _ := repo.Manifests(ctx)
|
||||
|
||||
// Create random layers
|
||||
randomLayers1, err := testutil.CreateRandomLayers(3)
|
||||
|
@ -252,7 +252,7 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) {
|
|||
}
|
||||
|
||||
manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator)
|
||||
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
|
||||
manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
|
||||
repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst})
|
||||
return nil
|
||||
})
|
||||
|
@ -336,7 +336,7 @@ func TestDeletionHasEffect(t *testing.T) {
|
|||
|
||||
registry := createRegistry(t, inmemoryDriver)
|
||||
repo := makeRepository(t, registry, "komnenos")
|
||||
manifests, err := repo.Manifests(ctx)
|
||||
manifests, _ := repo.Manifests(ctx)
|
||||
|
||||
image1 := uploadRandomSchema1Image(t, repo)
|
||||
image2 := uploadRandomSchema1Image(t, repo)
|
||||
|
@ -346,7 +346,7 @@ func TestDeletionHasEffect(t *testing.T) {
|
|||
manifests.Delete(ctx, image3.manifestDigest)
|
||||
|
||||
// Run GC
|
||||
err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{
|
||||
err := MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{
|
||||
DryRun: false,
|
||||
RemoveUntagged: false,
|
||||
})
|
||||
|
|
|
@ -22,7 +22,7 @@ func newUploadData() uploadData {
|
|||
return uploadData{
|
||||
containingDir: "",
|
||||
// default to far in future to protect against missing startedat
|
||||
startedAt: time.Now().Add(time.Duration(10000 * time.Hour)),
|
||||
startedAt: time.Now().Add(10000 * time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([
|
|||
tag: tag,
|
||||
}
|
||||
|
||||
tagLinkPath, err := pathFor(tagLinkPathSpec)
|
||||
tagLinkPath, _ := pathFor(tagLinkPathSpec)
|
||||
tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
|
|
|
@ -88,8 +88,7 @@ func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) {
|
|||
return nil, fmt.Errorf("unexpected error generating test layer file: %v", err)
|
||||
}
|
||||
|
||||
dgst := digest.Digest(ds)
|
||||
digestMap[dgst] = rs
|
||||
digestMap[ds] = rs
|
||||
}
|
||||
return digestMap, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue