feat(linter): enable errcheck linter in golangci-lint (#4158)

This commit is contained in:
João Pereira 2023-11-21 11:57:34 +00:00 committed by GitHub
commit 17872ebbc0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
36 changed files with 243 additions and 66 deletions

View file

@ -11,7 +11,6 @@ linters:
- misspell - misspell
- bodyclose - bodyclose
- prealloc - prealloc
disable:
- errcheck - errcheck
linters-settings: linters-settings:

View file

@ -18,5 +18,9 @@ import (
) )
func main() { func main() {
// NOTE(milosgajdos): if the only two commands registered
// with registry.RootCmd fail they will halt the program
// execution and exit the program with non-zero exit code.
// nolint:errcheck
registry.RootCmd.Execute() registry.RootCmd.Execute()
} }

View file

@ -2,7 +2,7 @@
ARG GO_VERSION=1.20.10 ARG GO_VERSION=1.20.10
ARG ALPINE_VERSION=3.18 ARG ALPINE_VERSION=3.18
ARG GOLANGCI_LINT_VERSION=v1.54.2 ARG GOLANGCI_LINT_VERSION=v1.55.2
ARG BUILDTAGS="include_gcs" ARG BUILDTAGS="include_gcs"
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint

View file

@ -265,6 +265,9 @@ func Handler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
checks := CheckStatus() checks := CheckStatus()
if len(checks) != 0 { if len(checks) != 0 {
// NOTE(milosgajdos): disable errcheck as the error is
// accessible via /debug/health
// nolint:errcheck
errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. errcode.ServeJSON(w, errcode.ErrorCodeUnavailable.
WithDetail("health check failed: please see /debug/health")) WithDetail("health check failed: please see /debug/health"))
return return

View file

@ -20,6 +20,7 @@ import (
"github.com/distribution/distribution/v3/internal/dcontext" "github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/auth" "github.com/distribution/distribution/v3/registry/auth"
"github.com/sirupsen/logrus"
) )
type accessController struct { type accessController struct {
@ -151,5 +152,7 @@ func createHtpasswdFile(path string) error {
} }
func init() { func init() {
auth.Register("htpasswd", auth.InitFunc(newAccessController)) if err := auth.Register("htpasswd", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("failed to register htpasswd auth: %v", err)
}
} }

View file

@ -33,7 +33,9 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
credentials, ok := htpasswd.entries[username] credentials, ok := htpasswd.entries[username]
if !ok { if !ok {
// timing attack paranoia // timing attack paranoia
bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) if err := bcrypt.CompareHashAndPassword([]byte{}, []byte(password)); err != nil {
return auth.ErrAuthenticationFailure
}
return auth.ErrAuthenticationFailure return auth.ErrAuthenticationFailure
} }

View file

@ -13,6 +13,7 @@ import (
"strings" "strings"
"github.com/distribution/distribution/v3/registry/auth" "github.com/distribution/distribution/v3/registry/auth"
"github.com/sirupsen/logrus"
) )
// accessController provides a simple implementation of auth.AccessController // accessController provides a simple implementation of auth.AccessController
@ -87,5 +88,7 @@ func (ch challenge) Error() string {
// init registers the silly auth backend. // init registers the silly auth backend.
func init() { func init() {
auth.Register("silly", auth.InitFunc(newAccessController)) if err := auth.Register("silly", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("failed to register silly auth: %v", err)
}
} }

View file

@ -14,6 +14,7 @@ import (
"github.com/distribution/distribution/v3/registry/auth" "github.com/distribution/distribution/v3/registry/auth"
"github.com/go-jose/go-jose/v3" "github.com/go-jose/go-jose/v3"
"github.com/sirupsen/logrus"
) )
// accessSet maps a typed, named resource to // accessSet maps a typed, named resource to
@ -339,5 +340,7 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac
// init handles registering the token auth backend. // init handles registering the token auth backend.
func init() { func init() {
auth.Register("token", auth.InitFunc(newAccessController)) if err := auth.Register("token", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("tailed to register token auth: %v", err)
}
} }

View file

@ -22,7 +22,10 @@ func FuzzToken1(f *testing.F) {
if err != nil { if err != nil {
return return
} }
token.Verify(verifyOps) _, err = token.Verify(verifyOps)
if err != nil {
return
}
_, _ = token.VerifySigningKey(verifyOps) _, _ = token.VerifySigningKey(verifyOps)
}) })
} }
@ -40,7 +43,10 @@ func FuzzToken2(f *testing.F) {
if err != nil { if err != nil {
return return
} }
token.Verify(verifyOps) _, err = token.Verify(verifyOps)
if err != nil {
return
}
_, _ = token.VerifySigningKey(verifyOps) _, _ = token.VerifySigningKey(verifyOps)
}) })
} }

View file

@ -339,6 +339,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest) checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid) checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// ----------------------------------- // -----------------------------------
@ -360,6 +361,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest) checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid) checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// ----------------------------------- // -----------------------------------
@ -409,6 +411,7 @@ func TestCatalogAPI(t *testing.T) {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest) checkResponse(t, "issuing catalog api check", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid) checkBodyHasErrorCodes(t, "invalid number of results requested", resp, errcode.ErrorCodePaginationNumberInvalid)
// ----------------------------------- // -----------------------------------
@ -584,6 +587,7 @@ func TestTagsAPI(t *testing.T) {
} }
if test.expectedBodyErr != nil { if test.expectedBodyErr != nil {
// nolint:errcheck
checkBodyHasErrorCodes(t, "invalid number of results requested", resp, *test.expectedBodyErr) checkBodyHasErrorCodes(t, "invalid number of results requested", resp, *test.expectedBodyErr)
} else { } else {
var body tagsAPIResponse var body tagsAPIResponse
@ -900,6 +904,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "bad layer push", resp, http.StatusBadRequest) checkResponse(t, "bad layer push", resp, http.StatusBadRequest)
// nolint:errcheck
checkBodyHasErrorCodes(t, "bad layer push", resp, errcode.ErrorCodeDigestInvalid) checkBodyHasErrorCodes(t, "bad layer push", resp, errcode.ErrorCodeDigestInvalid)
// ----------------------------------------- // -----------------------------------------
@ -927,15 +932,22 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// ------------------------------------------ // ------------------------------------------
// Now, actually do successful upload. // Now, actually do successful upload.
layerLength, _ := layerFile.Seek(0, io.SeekEnd) layerLength, err := layerFile.Seek(0, io.SeekEnd)
layerFile.Seek(0, io.SeekStart) if err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName) uploadURLBase, _ = startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
// ------------------------------------------ // ------------------------------------------
// Now, push just a chunk // Now, push just a chunk
layerFile.Seek(0, 0) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
canonicalDigester := digest.Canonical.Digester() canonicalDigester := digest.Canonical.Digester()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
@ -943,7 +955,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
} }
canonicalDigest := canonicalDigester.Digest() canonicalDigest := canonicalDigester.Digest()
layerFile.Seek(0, 0) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName) uploadURLBase, _ = startPushLayer(t, env, imageName)
uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength)
@ -961,7 +975,10 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// ----------------------------------------- // -----------------------------------------
// Do layer push with invalid content range // Do layer push with invalid content range
layerFile.Seek(0, io.SeekStart) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName) uploadURLBase, _ = startPushLayer(t, env, imageName)
sizeInvalid := chunkOptions{ sizeInvalid := chunkOptions{
contentRange: "0-20", contentRange: "0-20",
@ -973,7 +990,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "putting size invalid chunk", resp, http.StatusBadRequest) checkResponse(t, "putting size invalid chunk", resp, http.StatusBadRequest)
layerFile.Seek(0, io.SeekStart) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ = startPushLayer(t, env, imageName) uploadURLBase, _ = startPushLayer(t, env, imageName)
outOfOrder := chunkOptions{ outOfOrder := chunkOptions{
contentRange: "3-22", contentRange: "3-22",
@ -1013,7 +1032,9 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {
// Verify the body // Verify the body
verifier := layerDigest.Verifier() verifier := layerDigest.Verifier()
io.Copy(verifier, resp.Body) if _, err := io.Copy(verifier, resp.Body); err != nil {
t.Fatalf("unexpected error reading response body: %v", err)
}
if !verifier.Verified() { if !verifier.Verified() {
t.Fatalf("response body did not pass verification") t.Fatalf("response body did not pass verification")
@ -1134,12 +1155,16 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
// ---------------- // ----------------
// Reupload previously deleted blob // Reupload previously deleted blob
layerFile.Seek(0, io.SeekStart) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
uploadURLBase, _ := startPushLayer(t, env, imageName) uploadURLBase, _ := startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
layerFile.Seek(0, io.SeekStart) if _, err := layerFile.Seek(0, io.SeekStart); err != nil {
t.Fatalf("unexpected error seeking layer: %v", err)
}
canonicalDigester := digest.Canonical.Digester() canonicalDigester := digest.Canonical.Digester()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
t.Fatalf("error copying to digest: %v", err) t.Fatalf("error copying to digest: %v", err)
@ -1342,6 +1367,7 @@ func TestManifestAPI_DeleteTag_Unknown(t *testing.T) {
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, msg, resp, http.StatusNotFound) checkResponse(t, msg, resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, msg, resp, errcode.ErrorCodeManifestUnknown) checkBodyHasErrorCodes(t, msg, resp, errcode.ErrorCodeManifestUnknown)
} }
@ -1504,6 +1530,7 @@ func testManifestWithStorageError(t *testing.T, env *testEnv, imageName referenc
} }
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode) checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode)
} }
@ -1528,6 +1555,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
defer resp.Body.Close() defer resp.Body.Close()
checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, errcode.ErrorCodeManifestUnknown) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, errcode.ErrorCodeManifestUnknown)
tagsURL, err := env.builder.BuildTagsURL(imageName) tagsURL, err := env.builder.BuildTagsURL(imageName)
@ -1543,6 +1571,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
// Check that we get an unknown repository error when asking for tags // Check that we get an unknown repository error when asking for tags
checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound)
// nolint:errcheck
checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, errcode.ErrorCodeNameUnknown) checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, errcode.ErrorCodeNameUnknown)
// -------------------------------- // --------------------------------

View file

@ -212,7 +212,9 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request)
w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
w.Write(p) if _, err := w.Write(p); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
} }
func etagMatch(r *http.Request, etag string) bool { func etagMatch(r *http.Request, etag string) bool {

View file

@ -24,8 +24,14 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
if err != nil { if err != nil {
return return
} }
tc.SetKeepAlive(true) err = tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute) if err != nil {
return
}
err = tc.SetKeepAlivePeriod(3 * time.Minute)
if err != nil {
return
}
return tc, nil return tc, nil
} }

View file

@ -139,7 +139,10 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
} }
if pbs.scheduler != nil && pbs.ttl != nil { if pbs.scheduler != nil && pbs.ttl != nil {
pbs.scheduler.AddBlob(blobRef, *pbs.ttl) if err := pbs.scheduler.AddBlob(blobRef, *pbs.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding blob: %s", err)
return err
}
} }
return nil return nil

View file

@ -77,7 +77,10 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
} }
if pms.scheduler != nil && pms.ttl != nil { if pms.scheduler != nil && pms.ttl != nil {
pms.scheduler.AddManifest(repoBlob, *pms.ttl) if err := pms.scheduler.AddManifest(repoBlob, *pms.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding manifest: %s", err)
return nil, err
}
} }
// Ensure the manifest blob is cleaned up // Ensure the manifest blob is cleaned up

View file

@ -102,6 +102,7 @@ var ServeCmd = &cobra.Command{
config, err := resolveConfiguration(args) config, err := resolveConfiguration(args)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
// nolint:errcheck
cmd.Usage() cmd.Usage()
os.Exit(1) os.Exit(1)
} }

View file

@ -219,7 +219,9 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis
return nil, fmt.Errorf("failed to create certificate: %v", err) return nil, fmt.Errorf("failed to create certificate: %v", err)
} }
if _, err := os.Stat(os.TempDir()); os.IsNotExist(err) { if _, err := os.Stat(os.TempDir()); os.IsNotExist(err) {
os.Mkdir(os.TempDir(), 1777) if err := os.Mkdir(os.TempDir(), 1777); err != nil {
return nil, fmt.Errorf("failed to create temp dir: %v", err)
}
} }
certPath := path.Join(os.TempDir(), name+".pem") certPath := path.Join(os.TempDir(), name+".pem")

View file

@ -31,6 +31,7 @@ var RootCmd = &cobra.Command{
version.PrintVersion() version.PrintVersion()
return return
} }
// nolint:errcheck
cmd.Usage() cmd.Usage()
}, },
} }
@ -49,6 +50,7 @@ var GCCmd = &cobra.Command{
config, err := resolveConfiguration(args) config, err := resolveConfiguration(args)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
// nolint:errcheck
cmd.Usage() cmd.Usage()
os.Exit(1) os.Exit(1)
} }

View file

@ -39,7 +39,9 @@ func TestWriteSeek(t *testing.T) {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }
contents := []byte{1, 2, 3} contents := []byte{1, 2, 3}
blobUpload.Write(contents) if _, err := blobUpload.Write(contents); err != nil {
t.Fatalf("unexpected error writing contets: %v", err)
}
blobUpload.Close() blobUpload.Close()
offset := blobUpload.Size() offset := blobUpload.Size()
if offset != int64(len(contents)) { if offset != int64(len(contents)) {
@ -596,6 +598,7 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }
// nolint:errcheck
defer wr.Cancel(ctx) defer wr.Cancel(ctx)
if nn, err := io.Copy(wr, rd); err != nil { if nn, err := io.Copy(wr, rd); err != nil {

View file

@ -319,7 +319,9 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
copyStatus := *resp.CopyStatus copyStatus := *resp.CopyStatus
if d.copyStatusPollMaxRetry == -1 && copyStatus == blob.CopyStatusTypePending { if d.copyStatusPollMaxRetry == -1 && copyStatus == blob.CopyStatusTypePending {
destBlobRef.AbortCopyFromURL(ctx, *resp.CopyID, nil) if _, err := destBlobRef.AbortCopyFromURL(ctx, *resp.CopyID, nil); err != nil {
return err
}
return nil return nil
} }
@ -331,7 +333,9 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
} }
if retryCount >= d.copyStatusPollMaxRetry { if retryCount >= d.copyStatusPollMaxRetry {
destBlobRef.AbortCopyFromURL(ctx, *props.CopyID, nil) if _, err := destBlobRef.AbortCopyFromURL(ctx, *props.CopyID, nil); err != nil {
return err
}
return fmt.Errorf("max retries for copy polling reached, aborting copy") return fmt.Errorf("max retries for copy polling reached, aborting copy")
} }

View file

@ -107,7 +107,9 @@ func TestCommitAfterMove(t *testing.T) {
destPath := "/dest/file" destPath := "/dest/file"
ctx := context.Background() ctx := context.Background()
// nolint:errcheck
defer driver.Delete(ctx, sourcePath) defer driver.Delete(ctx, sourcePath)
// nolint:errcheck
defer driver.Delete(ctx, destPath) defer driver.Delete(ctx, destPath)
writer, err := driver.Writer(ctx, sourcePath, false) writer, err := driver.Writer(ctx, sourcePath, false)

View file

@ -4,6 +4,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -140,7 +141,9 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte
defer writer.Close() defer writer.Close()
_, err = io.Copy(writer, bytes.NewReader(contents)) _, err = io.Copy(writer, bytes.NewReader(contents))
if err != nil { if err != nil {
writer.Cancel(ctx) if cErr := writer.Cancel(ctx); cErr != nil {
return errors.Join(err, cErr)
}
return err return err
} }
return writer.Commit(ctx) return writer.Commit(ctx)

View file

@ -119,6 +119,7 @@ func TestCommitEmpty(t *testing.T) {
ctx := dcontext.Background() ctx := dcontext.Background()
writer, err := driver.Writer(ctx, filename, false) writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename) defer driver.Delete(ctx, filename)
if err != nil { if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err) t.Fatalf("driver.Writer: unexpected error: %v", err)
@ -162,6 +163,7 @@ func TestCommit(t *testing.T) {
contents := make([]byte, defaultChunkSize) contents := make([]byte, defaultChunkSize)
writer, err := driver.Writer(ctx, filename, false) writer, err := driver.Writer(ctx, filename, false)
// nolint:errcheck
defer driver.Delete(ctx, filename) defer driver.Delete(ctx, filename)
if err != nil { if err != nil {
t.Fatalf("driver.Writer: unexpected error: %v", err) t.Fatalf("driver.Writer: unexpected error: %v", err)

View file

@ -97,7 +97,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro
} }
f.truncate() f.truncate()
f.WriteAt(contents, 0) if _, err := f.WriteAt(contents, 0); err != nil {
return err
}
return nil return nil
} }
@ -301,7 +303,10 @@ func (w *writer) Close() error {
return fmt.Errorf("already closed") return fmt.Errorf("already closed")
} }
w.closed = true w.closed = true
w.flush()
if err := w.flush(); err != nil {
return err
}
return nil return nil
} }
@ -329,16 +334,23 @@ func (w *writer) Commit(ctx context.Context) error {
return fmt.Errorf("already cancelled") return fmt.Errorf("already cancelled")
} }
w.committed = true w.committed = true
w.flush()
if err := w.flush(); err != nil {
return err
}
return nil return nil
} }
func (w *writer) flush() { func (w *writer) flush() error {
w.d.mutex.Lock() w.d.mutex.Lock()
defer w.d.mutex.Unlock() defer w.d.mutex.Unlock()
w.f.WriteAt(w.buffer, int64(len(w.f.data))) if _, err := w.f.WriteAt(w.buffer, int64(len(w.f.data))); err != nil {
return err
}
w.buffer = []byte{} w.buffer = []byte{}
w.buffSize = 0 w.buffSize = 0
return nil
} }

View file

@ -17,6 +17,7 @@ import (
"github.com/distribution/distribution/v3/internal/dcontext" "github.com/distribution/distribution/v3/internal/dcontext"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware" storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware"
"github.com/sirupsen/logrus"
) )
// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that // cloudFrontStorageMiddleware provides a simple implementation of layerHandler that
@ -226,5 +227,7 @@ func (lh *cloudFrontStorageMiddleware) RedirectURL(r *http.Request, path string)
// init registers the cloudfront layerHandler backend. // init registers the cloudfront layerHandler backend.
func init() { func init() {
storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware) if err := storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware); err != nil {
logrus.Errorf("failed to register cloudfront middleware: %v", err)
}
} }

View file

@ -45,7 +45,9 @@ pZeMRablbPQdp8/1NyIwimq1VlG0ohQ4P6qhW7E09ZMC
if err != nil { if err != nil {
t.Fatal("File cannot be created") t.Fatal("File cannot be created")
} }
file.WriteString(privk) if _, err := file.WriteString(privk); err != nil {
t.Fatal(err)
}
defer os.Remove(file.Name()) defer os.Remove(file.Name())
options["privatekey"] = file.Name() options["privatekey"] = file.Name()
options["keypairid"] = "test" options["keypairid"] = "test"

View file

@ -9,6 +9,7 @@ import (
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"reflect" // used as a replacement for testify "reflect" // used as a replacement for testify
"sync"
"testing" "testing"
"time" "time"
) )
@ -31,7 +32,10 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500) w.WriteHeader(500)
return return
} }
w.Write(bytes) if _, err := w.Write(bytes); err != nil {
w.WriteHeader(500)
return
}
} }
func newTestHandler(data awsIPResponse) *httptest.Server { func newTestHandler(data awsIPResponse) *httptest.Server {
@ -77,7 +81,8 @@ func TestMatchIPV6(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("ff00::"))) assertEqual(t, true, ips.contains(net.ParseIP("ff00::")))
assertEqual(t, 1, len(ips.ipv6)) assertEqual(t, 1, len(ips.ipv6))
assertEqual(t, 0, len(ips.ipv4)) assertEqual(t, 0, len(ips.ipv4))
@ -94,7 +99,8 @@ func TestMatchIPV4(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -114,7 +120,8 @@ func TestMatchIPV4_2(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -134,7 +141,8 @@ func TestMatchIPV4WithRegionMatched(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-east-1"}) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-east-1"})
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -154,7 +162,8 @@ func TestMatchIPV4WithRegionMatch_2(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2", "us-east-1"}) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2", "us-east-1"})
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -174,7 +183,8 @@ func TestMatchIPV4WithRegionNotMatched(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2"}) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, []string{"us-west-2"})
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.0")))
assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.1"))) assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.1")))
assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0")))
@ -193,7 +203,8 @@ func TestInvalidData(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil) ips, _ := newAWSIPs(ctx, serverIPRanges(server), time.Hour, nil)
ips.tryUpdate(ctx) err := ips.tryUpdate(ctx)
assertEqual(t, err, nil)
assertEqual(t, 1, len(ips.ipv4)) assertEqual(t, 1, len(ips.ipv4))
} }
@ -227,7 +238,12 @@ func TestParsing(t *testing.T) {
"region": "anotherregion", "region": "anotherregion",
"service": "ec2"}] "service": "ec2"}]
}` }`
rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(data)) }) rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write([]byte(data)); err != nil {
w.WriteHeader(500)
return
}
})
t.Parallel() t.Parallel()
server := httptest.NewServer(rawMockHandler) server := httptest.NewServer(rawMockHandler)
defer server.Close() defer server.Close()
@ -251,15 +267,25 @@ func TestParsing(t *testing.T) {
func TestUpdateCalledRegularly(t *testing.T) { func TestUpdateCalledRegularly(t *testing.T) {
t.Parallel() t.Parallel()
mu := &sync.RWMutex{}
updateCount := 0 updateCount := 0
server := httptest.NewServer(http.HandlerFunc( server := httptest.NewServer(http.HandlerFunc(
func(rw http.ResponseWriter, req *http.Request) { func(rw http.ResponseWriter, req *http.Request) {
mu.Lock()
updateCount++ updateCount++
rw.Write([]byte("ok")) mu.Unlock()
if _, err := rw.Write([]byte("ok")); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
}
})) }))
defer server.Close() defer server.Close()
newAWSIPs(context.Background(), fmt.Sprintf("%s/", server.URL), time.Second, nil) if _, err := newAWSIPs(context.Background(), fmt.Sprintf("%s/", server.URL), time.Second, nil); err != nil {
t.Fatalf("failed creating AWS IP filter: %v", err)
}
time.Sleep(time.Second*4 + time.Millisecond*500) time.Sleep(time.Second*4 + time.Millisecond*500)
mu.RLock()
defer mu.RUnlock()
if updateCount < 4 { if updateCount < 4 {
t.Errorf("Update should have been called at least 4 times, actual=%d", updateCount) t.Errorf("Update should have been called at least 4 times, actual=%d", updateCount)
} }
@ -364,8 +390,14 @@ func BenchmarkContainsRandom(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ipv4[i] = make([]byte, 4) ipv4[i] = make([]byte, 4)
ipv6[i] = make([]byte, 16) ipv6[i] = make([]byte, 16)
rand.Read(ipv4[i]) _, err := rand.Read(ipv4[i])
rand.Read(ipv6[i]) if err != nil {
b.Fatal(err)
}
_, err = rand.Read(ipv6[i])
if err != nil {
b.Fatal(err)
}
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -381,8 +413,14 @@ func BenchmarkContainsProd(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ipv4[i] = make([]byte, 4) ipv4[i] = make([]byte, 4)
ipv6[i] = make([]byte, 16) ipv6[i] = make([]byte, 16)
rand.Read(ipv4[i]) _, err := rand.Read(ipv4[i])
rand.Read(ipv6[i]) if err != nil {
b.Fatal(err)
}
_, err = rand.Read(ipv6[i])
if err != nil {
b.Fatal(err)
}
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View file

@ -9,6 +9,7 @@ import (
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware" storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware"
"github.com/sirupsen/logrus"
) )
type redirectStorageMiddleware struct { type redirectStorageMiddleware struct {
@ -52,5 +53,7 @@ func (r *redirectStorageMiddleware) RedirectURL(_ *http.Request, urlPath string)
} }
func init() { func init() {
storagemiddleware.Register("redirect", newRedirectStorageMiddleware) if err := storagemiddleware.Register("redirect", newRedirectStorageMiddleware); err != nil {
logrus.Errorf("tailed to register redirect storage middleware: %v", err)
}
} }

View file

@ -186,6 +186,7 @@ func TestEmptyRootList(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error creating content: %v", err) t.Fatalf("unexpected error creating content: %v", err)
} }
// nolint:errcheck
defer rootedDriver.Delete(ctx, filename) defer rootedDriver.Delete(ctx, filename)
keys, _ := emptyRootDriver.List(ctx, "/") keys, _ := emptyRootDriver.List(ctx, "/")
@ -230,6 +231,7 @@ func TestStorageClass(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error creating content with storage class %v: %v", storageClass, err) t.Fatalf("unexpected error creating content with storage class %v: %v", storageClass, err)
} }
// nolint:errcheck
defer s3Driver.Delete(ctx, filename) defer s3Driver.Delete(ctx, filename)
driverUnwrapped := s3Driver.Base.StorageDriver.(*driver) driverUnwrapped := s3Driver.Base.StorageDriver.(*driver)
@ -751,17 +753,21 @@ func TestMoveWithMultipartCopy(t *testing.T) {
sourcePath := "/source" sourcePath := "/source"
destPath := "/dest" destPath := "/dest"
// nolint:errcheck
defer d.Delete(ctx, sourcePath) defer d.Delete(ctx, sourcePath)
// nolint:errcheck
defer d.Delete(ctx, destPath) defer d.Delete(ctx, destPath)
// An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy. // An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy.
multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize
contents := make([]byte, 2*multipartCopyThresholdSize) contents := make([]byte, 2*multipartCopyThresholdSize)
rand.Read(contents) if _, err := rand.Read(contents); err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
err = d.PutContent(ctx, sourcePath, contents) err = d.PutContent(ctx, sourcePath, contents)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating content: %v", err) t.Fatalf("unexpected error writing content: %v", err)
} }
err = d.Move(ctx, sourcePath, destPath) err = d.Move(ctx, sourcePath, destPath)

View file

@ -105,6 +105,9 @@ func Sign(req *request.Request) {
Time: req.Time, Time: req.Time,
Credentials: req.Config.Credentials, Credentials: req.Config.Credentials,
} }
// TODO(milosgajdos): figure this out; if Sign returns error which we should check,
// we should modify the codepath related to svc.Handlers.Sign.PushBack etc.
// nolint:errcheck
v2.Sign() v2.Sign()
} }

View file

@ -302,7 +302,9 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) {
defer reader.Close() defer reader.Close()
writtenChecksum := sha256.New() writtenChecksum := sha256.New()
io.Copy(writtenChecksum, reader) if _, err := io.Copy(writtenChecksum, reader); err != nil {
c.Assert(err, check.IsNil)
}
c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil))
} }
@ -1108,6 +1110,7 @@ func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) {
parentDir := randomPath(8) parentDir := randomPath(8)
defer func() { defer func() {
c.StopTimer() c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}() }()
@ -1146,6 +1149,7 @@ func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) {
parentDir := randomPath(8) parentDir := randomPath(8)
defer func() { defer func() {
c.StopTimer() c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}() }()
@ -1182,6 +1186,7 @@ func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) {
parentDir := randomPath(8) parentDir := randomPath(8)
defer func() { defer func() {
c.StopTimer() c.StopTimer()
// nolint:errcheck
suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir))
}() }()
@ -1240,8 +1245,10 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) {
_, err = tf.Write(contents) _, err = tf.Write(contents)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
tf.Sync() err = tf.Sync()
tf.Seek(0, io.SeekStart) c.Assert(err, check.IsNil)
_, err = tf.Seek(0, io.SeekStart)
c.Assert(err, check.IsNil)
writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -42,7 +42,9 @@ func TestSimpleRead(t *testing.T) {
} }
verifier := dgst.Verifier() verifier := dgst.Verifier()
io.Copy(verifier, fr) if _, err := io.Copy(verifier, fr); err != nil {
t.Fatalf("failed writing verification data: %v", err)
}
if !verifier.Verified() { if !verifier.Verified() {
t.Fatalf("unable to verify read data") t.Fatalf("unable to verify read data")

View file

@ -224,10 +224,12 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) {
} }
manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator) manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator)
manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst}) return repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst})
return nil
}) })
if err != nil {
t.Fatalf("manifest enumeration failed: %v", err)
}
before1 := allBlobs(t, registry) before1 := allBlobs(t, registry)
before2 := allManifests(t, manifestService) before2 := allManifests(t, manifestService)
@ -314,8 +316,13 @@ func TestDeletionHasEffect(t *testing.T) {
image2 := uploadRandomSchema2Image(t, repo) image2 := uploadRandomSchema2Image(t, repo)
image3 := uploadRandomSchema2Image(t, repo) image3 := uploadRandomSchema2Image(t, repo)
manifests.Delete(ctx, image2.manifestDigest) if err := manifests.Delete(ctx, image2.manifestDigest); err != nil {
manifests.Delete(ctx, image3.manifestDigest) t.Fatalf("failed deleting manifest digest: %v", err)
}
if err := manifests.Delete(ctx, image3.manifestDigest); err != nil {
t.Fatalf("failed deleting manifest digest: %v", err)
}
// Run GC // Run GC
err := MarkAndSweep(dcontext.Background(), inmemoryDriver, registry, GCOpts{ err := MarkAndSweep(dcontext.Background(), inmemoryDriver, registry, GCOpts{

View file

@ -137,7 +137,9 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) {
if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil {
t.Fatalf("unexpected error finishing upload: %v", err) t.Fatalf("unexpected error finishing upload: %v", err)
} }
builder.AppendReference(distribution.Descriptor{Digest: dgst}) if err := builder.AppendReference(distribution.Descriptor{Digest: dgst, MediaType: schema2.MediaTypeLayer}); err != nil {
t.Fatalf("unexpected error appending references: %v", err)
}
} }
sm, err := builder.Build(ctx) sm, err := builder.Build(ctx)
@ -349,7 +351,9 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
t.Fatalf("%s: unexpected error finishing upload: %v", testname, err) t.Fatalf("%s: unexpected error finishing upload: %v", testname, err)
} }
builder.AppendReference(distribution.Descriptor{Digest: dgst}) if err := builder.AppendReference(distribution.Descriptor{Digest: dgst, MediaType: v1.MediaTypeImageLayer}); err != nil {
t.Fatalf("%s unexpected error appending references: %v", testname, err)
}
} }
mfst, err := builder.Build(ctx) mfst, err := builder.Build(ctx)

View file

@ -35,7 +35,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
t.Fatalf("Unable to resolve path") t.Fatalf("Unable to resolve path")
} }
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { if err := d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file") t.Fatalf("Unable to write startedAt file")
} }
} }

View file

@ -143,5 +143,8 @@ func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(response.StatusCode) w.WriteHeader(response.StatusCode)
io.Copy(w, bytes.NewReader(response.Body)) if _, err := io.Copy(w, bytes.NewReader(response.Body)); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
} }

View file

@ -50,7 +50,9 @@ func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Di
} }
builder := schema2.NewManifestBuilder(d, configJSON) builder := schema2.NewManifestBuilder(d, configJSON)
for _, digest := range digests { for _, digest := range digests {
builder.AppendReference(distribution.Descriptor{Digest: digest}) if err := builder.AppendReference(distribution.Descriptor{Digest: digest}); err != nil {
return nil, fmt.Errorf("unexpected error building manifest: %v", err)
}
} }
mfst, err := builder.Build(ctx) mfst, err := builder.Build(ctx)