From c02f1a5507353fff1b430f9c1dc90fca0ba70df1 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 6 Jan 2015 10:37:27 -0800 Subject: [PATCH 001/501] Move registry package out of repo root Since the repo is no longer just the registry, we are moving the registry web application package out of the repo root into a sub-package. We may break down the registry package further to separate webapp components and bring the client package under it. This change accomplishes the task of freeing up the repo root for a distribution-oriented package. A stub doc.go file is left in place to declare intent. Signed-off-by: Stephen J Day --- docs/api_test.go | 541 ++++++++++++++++++++++++++++++++++++++++++++ docs/app.go | 263 +++++++++++++++++++++ docs/app_test.go | 194 ++++++++++++++++ docs/context.go | 32 +++ docs/helpers.go | 32 +++ docs/images.go | 114 ++++++++++ docs/layer.go | 62 +++++ docs/layerupload.go | 245 ++++++++++++++++++++ docs/tags.go | 60 +++++ docs/tokens.go | 65 ++++++ docs/tokens_test.go | 121 ++++++++++ docs/util.go | 27 +++ 12 files changed, 1756 insertions(+) create mode 100644 docs/api_test.go create mode 100644 docs/app.go create mode 100644 docs/app_test.go create mode 100644 docs/context.go create mode 100644 docs/helpers.go create mode 100644 docs/images.go create mode 100644 docs/layer.go create mode 100644 docs/layerupload.go create mode 100644 docs/tags.go create mode 100644 docs/tokens.go create mode 100644 docs/tokens_test.go create mode 100644 docs/util.go diff --git a/docs/api_test.go b/docs/api_test.go new file mode 100644 index 00000000..b0f3bb2b --- /dev/null +++ b/docs/api_test.go @@ -0,0 +1,541 @@ +package registry + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "testing" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + _ "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" +) + +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + +// TestLayerAPI conducts a full of the of the layer api. +func TestLayerAPI(t *testing.T) { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + + // ----------------------------------- + // Test fetch for non-existent content + layerURL, err := builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) + + // ------------------------------------------ + // Upload a layer + layerUploadURL, err := builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("error building upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("error starting layer upload: %v", err) + } + + checkResponse(t, "starting layer upload", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + // TODO(sday): Cancel the layer upload here and restart. + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) + + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) + + // Verify the body + verifier := digest.NewDigestVerifier(layerDigest) + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + t.Fatalf("response body did not pass verification") + } + + // Missing tests: + // - Upload the same tarsum file under and different repository and + // ensure the content remains uncorrupted. +} + +func TestManifestAPI(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("unexpected error creating url builder: %v", err) + } + + imageName := "foo/bar" + tag := "thetag" + + manifestURL, err := builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + dec := json.NewDecoder(resp.Body) + + var respErrs v2.Errors + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != v2.ErrorCodeManifestUnknown { + t.Fatalf("expected manifest unknown error: got %v", respErrs) + } + + tagsURL, err := builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != v2.ErrorCodeNameUnknown { + t.Fatalf("expected respository unknown error: got %v", respErrs) + } + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &manifest.Manifest{ + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + var unverified int + var missingLayers int + var invalidDigests int + + for _, err := range respErrs.Errors { + switch err.Code { + case v2.ErrorCodeManifestUnverified: + unverified++ + case v2.ErrorCodeBlobUnknown: + missingLayers++ + case v2.ErrorCodeDigestInvalid: + // TODO(stevvooe): This error isn't quite descriptive enough -- + // the layer with an invalid digest isn't identified. + invalidDigests++ + default: + t.Fatalf("unexpected error: %v", err) + } + } + + if unverified != 1 { + t.Fatalf("should have received one unverified manifest error: %v", respErrs) + } + + if missingLayers != 2 { + t.Fatalf("should have received two missing layer errors: %v", respErrs) + } + + if invalidDigests != 2 { + t.Fatalf("should have received two invalid digest errors: %v", respErrs) + } + + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get a unverified manifest + // error. + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := manifest.Sign(unsignedManifest, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) + + checkResponse(t, "putting signed manifest", resp, http.StatusOK) + + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + + var fetchedManifest manifest.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } +} + +func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { + var body []byte + if sm, ok := v.(*manifest.SignedManifest); ok { + body = sm.Raw + } else { + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { + layerUploadURL, err := ub.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { + rsLength, _ := rs.Seek(0, os.SEEK_END) + rs.Seek(0, os.SEEK_SET) + + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + + "digest": []string{dgst.String()}, + + // TODO(stevvooe): Layer upload can be completed with and without size + // argument. We'll need to add a test that checks the latter path. + "size": []string{fmt.Sprint(rsLength)}, + }.Encode() + + uploadURL := u.String() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, rs) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error doing put: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[k]) > 0 { + continue + } + } + + for _, hv := range resp.Header[k] { + if hv != v { + t.Fatalf("header value not matched in response: %q != %q", hv, v) + } + } + } + } +} diff --git a/docs/app.go b/docs/app.go new file mode 100644 index 00000000..fefeb084 --- /dev/null +++ b/docs/app.go @@ -0,0 +1,263 @@ +package registry + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/auth" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/storage" + "github.com/docker/distribution/storagedriver" + "github.com/docker/distribution/storagedriver/factory" + + log "github.com/Sirupsen/logrus" + "github.com/gorilla/mux" +) + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + Config configuration.Configuration + + router *mux.Router + + // driver maintains the app global storage driver instance. + driver storagedriver.StorageDriver + + // services contains the main services instance for the application. + services *storage.Services + + tokenProvider tokenProvider + + accessController auth.AccessController +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(configuration configuration.Configuration) *App { + app := &App{ + Config: configuration, + router: v2.Router(), + } + + // Register the handler dispatchers. + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, layerDispatcher) + app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) + + driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + app.driver = driver + app.services = storage.NewServices(app.driver) + app.tokenProvider = newHMACTokenProvider(configuration.HTTP.Secret) + + authType := configuration.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + } + + return app +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + app.router.ServeHTTP(w, r) +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// singleStatusResponseWriter only allows the first status to be written to be +// the valid request status. The current use case of this class should be +// factored out. +type singleStatusResponseWriter struct { + http.ResponseWriter + status int +} + +func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { + if ssrw.status != 0 { + return + } + ssrw.status = status + ssrw.ResponseWriter.WriteHeader(status) +} + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + context := app.context(r) + + if err := app.authorized(w, r, context); err != nil { + return + } + + context.log = log.WithField("name", context.Name) + handler := dispatch(context, r) + + ssrw := &singleStatusResponseWriter{ResponseWriter: w} + context.log.Infoln("handler", resolveHandlerName(r.Method, handler)) + handler.ServeHTTP(ssrw, r) + + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if context.Errors.Len() > 0 { + if ssrw.status == 0 { + w.WriteHeader(http.StatusBadRequest) + } + serveJSON(w, context.Errors) + } + }) +} + +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(r *http.Request) *Context { + vars := mux.Vars(r) + context := &Context{ + App: app, + Name: vars["name"], + urlBuilder: v2.NewURLBuilderFromRequest(r), + } + + // Store vars for underlying handlers. + context.vars = vars + + return context +} + +// authorized checks if the request can proceed with with request access- +// level. If it cannot, the method will return an error. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + + if context.Name != "" { + resource := auth.Resource{ + Type: "repository", + Name: context.Name, + } + + switch r.Method { + case "GET", "HEAD": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + } else { + // Only allow the name not to be set on the base route. + route := mux.CurrentRoute(r) + + if route == nil || route.GetName() != v2.RouteNameBase { + // For this to be properly secured, context.Name must always be set + // for a resource that may make a modification. The only condition + // under which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making that + // mistake elsewhere in the code, allowing any operation to proceed. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized) + serveJSON(w, errs) + } + } + + if err := app.accessController.Authorized(r, accessRecords...); err != nil { + switch err := err.(type) { + case auth.Challenge: + w.Header().Set("Content-Type", "application/json; charset=utf-8") + err.ServeHTTP(w, r) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized, accessRecords) + serveJSON(w, errs) + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + context.log.Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + return nil +} + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} diff --git a/docs/app_test.go b/docs/app_test.go new file mode 100644 index 00000000..4d9535f7 --- /dev/null +++ b/docs/app_test.go @@ -0,0 +1,194 @@ +package registry + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/distribution/api/v2" + _ "github.com/docker/distribution/auth/silly" + "github.com/docker/distribution/configuration" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + app := &App{ + Config: configuration.Configuration{}, + router: v2.Router(), + } + server := httptest.NewServer(app) + router := v2.Router() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Name != ctx.vars["name"] { + t.Fatalf("unexpected name: %q != %q", ctx.Name, "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.vars[expectedK] != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.vars[expectedK], expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.vars { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: v2.RouteNameManifest, + vars: []string{ + "name", "foo/bar", + "tag", "sometag", + }, + }, + { + endpoint: v2.RouteNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlob, + vars: []string{ + "name", "foo/bar", + "digest", "tarsum.v1+bogus:abcdef0123456789", + }, + }, + { + endpoint: v2.RouteNameBlobUpload, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: v2.RouteNameBlobUploadChunk, + vars: []string{ + "name", "foo/bar", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": nil, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(config) + + server := httptest.NewServer(app) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if req.Header.Get("Authorization") != expectedAuthHeader { + t.Fatalf("unexpected authorization header: %q != %q", req.Header.Get("Authorization"), expectedAuthHeader) + } + + var errs v2.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) + } +} diff --git a/docs/context.go b/docs/context.go new file mode 100644 index 00000000..88193cda --- /dev/null +++ b/docs/context.go @@ -0,0 +1,32 @@ +package registry + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/api/v2" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + + // Name is the prefix for the current request. Corresponds to the + // namespace/repository associated with the image. + Name string + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors v2.Errors + + // vars contains the extracted gorilla/mux variables that can be used for + // assignment. + vars map[string]string + + // log provides a context specific logger. + log *logrus.Entry + + urlBuilder *v2.URLBuilder +} diff --git a/docs/helpers.go b/docs/helpers.go new file mode 100644 index 00000000..6bcb4ae8 --- /dev/null +++ b/docs/helpers.go @@ -0,0 +1,32 @@ +package registry + +import ( + "encoding/json" + "io" + "net/http" +) + +// serveJSON marshals v and sets the content-type header to +// 'application/json'. If a different status code is required, call +// ResponseWriter.WriteHeader before this function. +func serveJSON(w http.ResponseWriter, v interface{}) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + enc := json.NewEncoder(w) + + if err := enc.Encode(v); err != nil { + return err + } + + return nil +} + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} diff --git a/docs/images.go b/docs/images.go new file mode 100644 index 00000000..a6b55859 --- /dev/null +++ b/docs/images.go @@ -0,0 +1,114 @@ +package registry + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + Tag: ctx.vars["tag"], + } + + imageManifestHandler.log = imageManifestHandler.log.WithField("tag", imageManifestHandler.Tag) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), + "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), + } +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + Tag string +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + manifest, err := manifests.Get(imh.Name, imh.Tag) + + if err != nil { + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) + w.WriteHeader(http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Length", fmt.Sprint(len(manifest.Raw))) + w.Write(manifest.Raw) +} + +// PutImageManifest validates and stores and image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + dec := json.NewDecoder(r.Body) + + var manifest manifest.SignedManifest + if err := dec.Decode(&manifest); err != nil { + imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) + w.WriteHeader(http.StatusBadRequest) + return + } + + if err := manifests.Put(imh.Name, imh.Tag, &manifest); err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + switch err := err.(type) { + case storage.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case storage.ErrUnknownLayer: + imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) + case storage.ErrManifestUnverified: + imh.Errors.Push(v2.ErrorCodeManifestUnverified) + default: + if verificationError == digest.ErrDigestInvalidFormat { + // TODO(stevvooe): We need to really need to move all + // errors to types. Its much more straightforward. + imh.Errors.Push(v2.ErrorCodeDigestInvalid) + } else { + imh.Errors.PushErr(verificationError) + } + } + } + default: + imh.Errors.PushErr(err) + } + + w.WriteHeader(http.StatusBadRequest) + return + } +} + +// DeleteImageManifest removes the image with the given tag from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + if err := manifests.Delete(imh.Name, imh.Tag); err != nil { + switch err := err.(type) { + case storage.ErrUnknownManifest: + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) + w.WriteHeader(http.StatusNotFound) + default: + imh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusBadRequest) + } + return + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/docs/layer.go b/docs/layer.go new file mode 100644 index 00000000..a7c46c31 --- /dev/null +++ b/docs/layer.go @@ -0,0 +1,62 @@ +package registry + +import ( + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// layerDispatcher uses the request context to build a layerHandler. +func layerDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := digest.ParseDigest(ctx.vars["digest"]) + + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + }) + } + + layerHandler := &layerHandler{ + Context: ctx, + Digest: dgst, + } + + layerHandler.log = layerHandler.log.WithField("digest", dgst) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(layerHandler.GetLayer), + "HEAD": http.HandlerFunc(layerHandler.GetLayer), + } +} + +// layerHandler serves http layer requests. +type layerHandler struct { + *Context + + Digest digest.Digest +} + +// GetLayer fetches the binary data from backend storage returns it in the +// response. +func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { + layers := lh.services.Layers() + + layer, err := layers.Fetch(lh.Name, lh.Digest) + + if err != nil { + switch err := err.(type) { + case storage.ErrUnknownLayer: + w.WriteHeader(http.StatusNotFound) + lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) + default: + lh.Errors.Push(v2.ErrorCodeUnknown, err) + } + return + } + defer layer.Close() + + http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) +} diff --git a/docs/layerupload.go b/docs/layerupload.go new file mode 100644 index 00000000..b694a677 --- /dev/null +++ b/docs/layerupload.go @@ -0,0 +1,245 @@ +package registry + +import ( + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// layerUploadDispatcher constructs and returns the layer upload handler for +// the given request context. +func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + luh := &layerUploadHandler{ + Context: ctx, + UUID: ctx.vars["uuid"], + } + + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(luh.StartLayerUpload), + "GET": http.HandlerFunc(luh.GetUploadStatus), + "HEAD": http.HandlerFunc(luh.GetUploadStatus), + "PUT": http.HandlerFunc(luh.PutLayerChunk), + "DELETE": http.HandlerFunc(luh.CancelLayerUpload), + }) + + if luh.UUID != "" { + luh.log = luh.log.WithField("uuid", luh.UUID) + + state, err := ctx.tokenProvider.layerUploadStateFromToken(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logrus.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + }) + } + + layers := ctx.services.Layers() + upload, err := layers.Resume(state) + if err != nil && err != storage.ErrLayerUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logrus.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + }) + } + + luh.Upload = upload + handler = closeResources(handler, luh.Upload) + } + + return handler +} + +// layerUploadHandler handles the http layer upload process. +type layerUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. + UUID string + + Upload storage.LayerUpload +} + +// StartLayerUpload begins the layer upload process and allocates a server- +// side upload session. +func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { + layers := luh.services.Layers() + upload, err := layers.Upload(luh.Name) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + luh.Upload = upload + defer luh.Upload.Close() + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by uuid. +func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// PutLayerChunk receives a layer chunk during the layer upload process, +// possible completing the upload with a checksum and length. +func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + + var finished bool + + // TODO(stevvooe): This is woefully incomplete. Missing stuff: + // + // 1. Extract information from range header, if present. + // 2. Check offset of current layer. + // 3. Emit correct error responses. + + // Read in the chunk + io.Copy(luh.Upload, r.Body) + + if err := luh.maybeCompleteUpload(w, r); err != nil { + if err != errNotReadyToComplete { + switch err := err.(type) { + case storage.ErrLayerInvalidSize: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeSizeInvalid, err) + return + case storage.ErrLayerInvalidDigest: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + return + default: + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + } + } + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + if finished { + w.WriteHeader(http.StatusCreated) + } else { + w.WriteHeader(http.StatusAccepted) + } +} + +// CancelLayerUpload cancels an in-progress upload of a layer. +func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + } + +} + +// layerUploadResponse provides a standard request for uploading layers and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. +func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { + values := make(url.Values) + stateToken, err := luh.Context.tokenProvider.layerUploadStateToToken(storage.LayerUploadState{Name: luh.Upload.Name(), UUID: luh.Upload.UUID(), Offset: luh.Upload.Offset()}) + if err != nil { + logrus.Infof("error building upload state token: %s", err) + return err + } + values.Set("_state", stateToken) + uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL(luh.Upload.Name(), luh.Upload.UUID(), values) + if err != nil { + logrus.Infof("error building upload url: %s", err) + return err + } + + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", luh.Upload.Offset())) + + return nil +} + +var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") + +// maybeCompleteUpload tries to complete the upload if the correct parameters +// are available. Returns errNotReadyToComplete if not ready to complete. +func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { + // If we get a digest and length, we can finish the upload. + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + sizeStr := r.FormValue("size") + + if dgstStr == "" { + return errNotReadyToComplete + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + return err + } + + var size int64 + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return err + } + } else { + size = -1 + } + + luh.completeUpload(w, r, size, dgst) + return nil +} + +// completeUpload finishes out the upload with the correct response. +func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { + layer, err := luh.Upload.Finish(size, dgst) + if err != nil { + luh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) + if err != nil { + luh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Location", layerURL) + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusCreated) +} diff --git a/docs/tags.go b/docs/tags.go new file mode 100644 index 00000000..18f6add2 --- /dev/null +++ b/docs/tags.go @@ -0,0 +1,60 @@ +package registry + +import ( + "encoding/json" + "net/http" + + "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/storage" + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + manifests := th.services.Manifests() + + tags, err := manifests.Tags(th.Name) + if err != nil { + switch err := err.(type) { + case storage.ErrUnknownRepository: + w.WriteHeader(404) + th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) + default: + th.Errors.PushErr(err) + } + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Name, + Tags: tags, + }); err != nil { + th.Errors.PushErr(err) + return + } +} diff --git a/docs/tokens.go b/docs/tokens.go new file mode 100644 index 00000000..276b896e --- /dev/null +++ b/docs/tokens.go @@ -0,0 +1,65 @@ +package registry + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/docker/distribution/storage" +) + +// tokenProvider contains methods for serializing and deserializing state from token strings. +type tokenProvider interface { + // layerUploadStateFromToken retrieves the LayerUploadState for a given state token. + layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) + + // layerUploadStateToToken returns a token string representing the given LayerUploadState. + layerUploadStateToToken(layerUploadState storage.LayerUploadState) (string, error) +} + +type hmacTokenProvider struct { + secret string +} + +func newHMACTokenProvider(secret string) tokenProvider { + return &hmacTokenProvider{secret: secret} +} + +// layerUploadStateFromToken deserializes the given HMAC stateToken and validates the prefix HMAC +func (ts *hmacTokenProvider) layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) { + var lus storage.LayerUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(stateToken) + if err != nil { + return lus, err + } + mac := hmac.New(sha256.New, []byte(ts.secret)) + + if len(tokenBytes) < mac.Size() { + return lus, fmt.Errorf("Invalid token") + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return lus, fmt.Errorf("Invalid token") + } + + if err := json.Unmarshal(messageBytes, &lus); err != nil { + return lus, err + } + + return lus, nil +} + +// layerUploadStateToToken serializes the given LayerUploadState to JSON with an HMAC prepended +func (ts *hmacTokenProvider) layerUploadStateToToken(lus storage.LayerUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(ts.secret)) + stateJSON := fmt.Sprintf("{\"Name\": \"%s\", \"UUID\": \"%s\", \"Offset\": %d}", lus.Name, lus.UUID, lus.Offset) + mac.Write([]byte(stateJSON)) + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), stateJSON...)), nil +} diff --git a/docs/tokens_test.go b/docs/tokens_test.go new file mode 100644 index 00000000..a447438a --- /dev/null +++ b/docs/tokens_test.go @@ -0,0 +1,121 @@ +package registry + +import ( + "testing" + + "github.com/docker/distribution/storage" +) + +var layerUploadStates = []storage.LayerUploadState{ + { + Name: "hello", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "hello-world", + UUID: "abcd-1234-qwer-0987", + Offset: 0, + }, + { + Name: "h3ll0_w0rld", + UUID: "abcd-1234-qwer-0987", + Offset: 1337, + }, + { + Name: "ABCDEFG", + UUID: "ABCD-1234-QWER-0987", + Offset: 1234567890, + }, + { + Name: "this-is-A-sort-of-Long-name-for-Testing", + UUID: "dead-1234-beef-0987", + Offset: 8675309, + }, +} + +var secrets = []string{ + "supersecret", + "12345", + "a", + "SuperSecret", + "Sup3r... S3cr3t!", + "This is a reasonably long secret key that is used for the purpose of testing.", + "\u2603+\u2744", // snowman+snowflake +} + +// TestLayerUploadTokens constructs stateTokens from LayerUploadStates and +// validates that the tokens can be used to reconstruct the proper upload state. +func TestLayerUploadTokens(t *testing.T) { + tokenProvider := newHMACTokenProvider("supersecret") + + for _, testcase := range layerUploadStates { + token, err := tokenProvider.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := tokenProvider.layerUploadStateFromToken(token) + if err != nil { + t.Fatal(err) + } + + assertLayerUploadStateEquals(t, testcase, lus) + } +} + +// TestHMACValidate ensures that any HMAC token providers are compatible if and +// only if they share the same secret. +func TestHMACValidation(t *testing.T) { + for _, secret := range secrets { + tokenProvider1 := newHMACTokenProvider(secret) + tokenProvider2 := newHMACTokenProvider(secret) + badTokenProvider := newHMACTokenProvider("DifferentSecret") + + for _, testcase := range layerUploadStates { + token, err := tokenProvider1.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + lus, err := tokenProvider2.layerUploadStateFromToken(token) + if err != nil { + t.Fatal(err) + } + + assertLayerUploadStateEquals(t, testcase, lus) + + _, err = badTokenProvider.layerUploadStateFromToken(token) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) + } + + badToken, err := badTokenProvider.layerUploadStateToToken(testcase) + if err != nil { + t.Fatal(err) + } + + _, err = tokenProvider1.layerUploadStateFromToken(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + + _, err = tokenProvider2.layerUploadStateFromToken(badToken) + if err == nil { + t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) + } + } + } +} + +func assertLayerUploadStateEquals(t *testing.T, expected storage.LayerUploadState, received storage.LayerUploadState) { + if expected.Name != received.Name { + t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) + } + if expected.UUID != received.UUID { + t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) + } + if expected.Offset != received.Offset { + t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) + } +} diff --git a/docs/util.go b/docs/util.go new file mode 100644 index 00000000..976ddf31 --- /dev/null +++ b/docs/util.go @@ -0,0 +1,27 @@ +package registry + +import ( + "net/http" + "reflect" + "runtime" + + "github.com/gorilla/handlers" +) + +// functionName returns the name of the function fn. +func functionName(fn interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +// resolveHandlerName attempts to resolve a nice, pretty name for the passed +// in handler. +func resolveHandlerName(method string, handler http.Handler) string { + switch v := handler.(type) { + case handlers.MethodHandler: + return functionName(v[method]) + case http.HandlerFunc: + return functionName(v) + default: + return functionName(handler.ServeHTTP) + } +} From b11d549fd071def4409c63c0531490372ddeb184 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 8 Jan 2015 16:55:40 -0800 Subject: [PATCH 002/501] Adds support for content redirects for layer downloads Includes a delegate implementation which redirects to the URL generated by the storagedriver, and a cloudfront implementation. Satisfies proposal #49 --- docs/app.go | 12 ++++++++++++ docs/layer.go | 6 ++++++ 2 files changed, 18 insertions(+) diff --git a/docs/app.go b/docs/app.go index fefeb084..b757b9ab 100644 --- a/docs/app.go +++ b/docs/app.go @@ -31,6 +31,8 @@ type App struct { tokenProvider tokenProvider + layerHandler storage.LayerHandler + accessController auth.AccessController } @@ -76,6 +78,16 @@ func NewApp(configuration configuration.Configuration) *App { app.accessController = accessController } + layerHandlerType := configuration.HTTP.LayerHandler.Type() + + if layerHandlerType != "" { + lh, err := storage.GetLayerHandler(layerHandlerType, configuration.HTTP.LayerHandler.Parameters(), driver) + if err != nil { + panic(fmt.Sprintf("unable to configure layer handler (%s): %v", layerHandlerType, err)) + } + app.layerHandler = lh + } + return app } diff --git a/docs/layer.go b/docs/layer.go index a7c46c31..5d43a1ad 100644 --- a/docs/layer.go +++ b/docs/layer.go @@ -58,5 +58,11 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { } defer layer.Close() + handler, err := lh.layerHandler.Resolve(layer) + if handler != nil { + handler.ServeHTTP(w, r) + return + } + http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) } From 9d3436c18e9f520a511128246394e8835309959d Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 8 Jan 2015 17:29:22 -0800 Subject: [PATCH 003/501] Fixes tests, moves layerhandler in config file --- docs/app.go | 4 ++-- docs/layer.go | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/app.go b/docs/app.go index b757b9ab..72ac4f06 100644 --- a/docs/app.go +++ b/docs/app.go @@ -78,10 +78,10 @@ func NewApp(configuration configuration.Configuration) *App { app.accessController = accessController } - layerHandlerType := configuration.HTTP.LayerHandler.Type() + layerHandlerType := configuration.LayerHandler.Type() if layerHandlerType != "" { - lh, err := storage.GetLayerHandler(layerHandlerType, configuration.HTTP.LayerHandler.Parameters(), driver) + lh, err := storage.GetLayerHandler(layerHandlerType, configuration.LayerHandler.Parameters(), driver) if err != nil { panic(fmt.Sprintf("unable to configure layer handler (%s): %v", layerHandlerType, err)) } diff --git a/docs/layer.go b/docs/layer.go index 5d43a1ad..836df3b7 100644 --- a/docs/layer.go +++ b/docs/layer.go @@ -58,10 +58,12 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { } defer layer.Close() - handler, err := lh.layerHandler.Resolve(layer) - if handler != nil { - handler.ServeHTTP(w, r) - return + if lh.layerHandler != nil { + handler, _ := lh.layerHandler.Resolve(layer) + if handler != nil { + handler.ServeHTTP(w, r) + return + } } http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) From fdcfc56f7bd6853cbe375f85bd99bbabb9325245 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 8 Jan 2015 14:59:15 -0800 Subject: [PATCH 004/501] Refactor handling of hmac state packing This refactors the hmac state token to take control of the layerUploadState json message, which has been removed from the storage backend. It also moves away from the concept of a LayerUploadStateStore callback object, which was short-lived. This allows for upload offset to be managed by the web application logic in the face of an inconsistent backend. By controlling the upload offset externally, we reduce the possibility of misreporting upload state to a client. We may still want to modify the way this works after getting production experience. Signed-off-by: Stephen J Day --- docs/hmac.go | 72 +++++++++++++++++++++++++++ docs/{tokens_test.go => hmac_test.go} | 34 ++++++------- docs/tokens.go | 65 ------------------------ 3 files changed, 87 insertions(+), 84 deletions(-) create mode 100644 docs/hmac.go rename docs/{tokens_test.go => hmac_test.go} (69%) delete mode 100644 docs/tokens.go diff --git a/docs/hmac.go b/docs/hmac.go new file mode 100644 index 00000000..d2470087 --- /dev/null +++ b/docs/hmac.go @@ -0,0 +1,72 @@ +package registry + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "time" +) + +// layerUploadState captures the state serializable state of the layer upload. +type layerUploadState struct { + // name is the primary repository under which the layer will be linked. + Name string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 + + // StartedAt is the original start time of the upload. + StartedAt time.Time +} + +type hmacKey string + +// unpackUploadState unpacks and validates the layer upload state from the +// token, using the hmacKey secret. +func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) { + var state layerUploadState + + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return state, err + } + mac := hmac.New(sha256.New, []byte(secret)) + + if len(tokenBytes) < mac.Size() { + return state, fmt.Errorf("Invalid token") + } + + macBytes := tokenBytes[:mac.Size()] + messageBytes := tokenBytes[mac.Size():] + + mac.Write(messageBytes) + if !hmac.Equal(mac.Sum(nil), macBytes) { + return state, fmt.Errorf("Invalid token") + } + + if err := json.Unmarshal(messageBytes, &state); err != nil { + return state, err + } + + return state, nil +} + +// packUploadState packs the upload state signed with and hmac digest using +// the hmacKey secret, encoding to url safe base64. The resulting token can be +// used to share data with minimized risk of external tampering. +func (secret hmacKey) packUploadState(lus layerUploadState) (string, error) { + mac := hmac.New(sha256.New, []byte(secret)) + p, err := json.Marshal(lus) + if err != nil { + return "", err + } + + mac.Write(p) + + return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil +} diff --git a/docs/tokens_test.go b/docs/hmac_test.go similarity index 69% rename from docs/tokens_test.go rename to docs/hmac_test.go index a447438a..5ad60f61 100644 --- a/docs/tokens_test.go +++ b/docs/hmac_test.go @@ -1,12 +1,8 @@ package registry -import ( - "testing" +import "testing" - "github.com/docker/distribution/storage" -) - -var layerUploadStates = []storage.LayerUploadState{ +var layerUploadStates = []layerUploadState{ { Name: "hello", UUID: "abcd-1234-qwer-0987", @@ -47,15 +43,15 @@ var secrets = []string{ // TestLayerUploadTokens constructs stateTokens from LayerUploadStates and // validates that the tokens can be used to reconstruct the proper upload state. func TestLayerUploadTokens(t *testing.T) { - tokenProvider := newHMACTokenProvider("supersecret") + secret := hmacKey("supersecret") for _, testcase := range layerUploadStates { - token, err := tokenProvider.layerUploadStateToToken(testcase) + token, err := secret.packUploadState(testcase) if err != nil { t.Fatal(err) } - lus, err := tokenProvider.layerUploadStateFromToken(token) + lus, err := secret.unpackUploadState(token) if err != nil { t.Fatal(err) } @@ -68,39 +64,39 @@ func TestLayerUploadTokens(t *testing.T) { // only if they share the same secret. func TestHMACValidation(t *testing.T) { for _, secret := range secrets { - tokenProvider1 := newHMACTokenProvider(secret) - tokenProvider2 := newHMACTokenProvider(secret) - badTokenProvider := newHMACTokenProvider("DifferentSecret") + secret1 := hmacKey(secret) + secret2 := hmacKey(secret) + badSecret := hmacKey("DifferentSecret") for _, testcase := range layerUploadStates { - token, err := tokenProvider1.layerUploadStateToToken(testcase) + token, err := secret1.packUploadState(testcase) if err != nil { t.Fatal(err) } - lus, err := tokenProvider2.layerUploadStateFromToken(token) + lus, err := secret2.unpackUploadState(token) if err != nil { t.Fatal(err) } assertLayerUploadStateEquals(t, testcase, lus) - _, err = badTokenProvider.layerUploadStateFromToken(token) + _, err = badSecret.unpackUploadState(token) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) } - badToken, err := badTokenProvider.layerUploadStateToToken(testcase) + badToken, err := badSecret.packUploadState(lus) if err != nil { t.Fatal(err) } - _, err = tokenProvider1.layerUploadStateFromToken(badToken) + _, err = secret1.unpackUploadState(badToken) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) } - _, err = tokenProvider2.layerUploadStateFromToken(badToken) + _, err = secret2.unpackUploadState(badToken) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) } @@ -108,7 +104,7 @@ func TestHMACValidation(t *testing.T) { } } -func assertLayerUploadStateEquals(t *testing.T, expected storage.LayerUploadState, received storage.LayerUploadState) { +func assertLayerUploadStateEquals(t *testing.T, expected layerUploadState, received layerUploadState) { if expected.Name != received.Name { t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) } diff --git a/docs/tokens.go b/docs/tokens.go deleted file mode 100644 index 276b896e..00000000 --- a/docs/tokens.go +++ /dev/null @@ -1,65 +0,0 @@ -package registry - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - - "github.com/docker/distribution/storage" -) - -// tokenProvider contains methods for serializing and deserializing state from token strings. -type tokenProvider interface { - // layerUploadStateFromToken retrieves the LayerUploadState for a given state token. - layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) - - // layerUploadStateToToken returns a token string representing the given LayerUploadState. - layerUploadStateToToken(layerUploadState storage.LayerUploadState) (string, error) -} - -type hmacTokenProvider struct { - secret string -} - -func newHMACTokenProvider(secret string) tokenProvider { - return &hmacTokenProvider{secret: secret} -} - -// layerUploadStateFromToken deserializes the given HMAC stateToken and validates the prefix HMAC -func (ts *hmacTokenProvider) layerUploadStateFromToken(stateToken string) (storage.LayerUploadState, error) { - var lus storage.LayerUploadState - - tokenBytes, err := base64.URLEncoding.DecodeString(stateToken) - if err != nil { - return lus, err - } - mac := hmac.New(sha256.New, []byte(ts.secret)) - - if len(tokenBytes) < mac.Size() { - return lus, fmt.Errorf("Invalid token") - } - - macBytes := tokenBytes[:mac.Size()] - messageBytes := tokenBytes[mac.Size():] - - mac.Write(messageBytes) - if !hmac.Equal(mac.Sum(nil), macBytes) { - return lus, fmt.Errorf("Invalid token") - } - - if err := json.Unmarshal(messageBytes, &lus); err != nil { - return lus, err - } - - return lus, nil -} - -// layerUploadStateToToken serializes the given LayerUploadState to JSON with an HMAC prepended -func (ts *hmacTokenProvider) layerUploadStateToToken(lus storage.LayerUploadState) (string, error) { - mac := hmac.New(sha256.New, []byte(ts.secret)) - stateJSON := fmt.Sprintf("{\"Name\": \"%s\", \"UUID\": \"%s\", \"Offset\": %d}", lus.Name, lus.UUID, lus.Offset) - mac.Write([]byte(stateJSON)) - return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), stateJSON...)), nil -} From cd92071caa2556480a2c39e6dff690458b4ea21b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 8 Jan 2015 15:04:00 -0800 Subject: [PATCH 005/501] Directly manage layerUploadState in webapp Most of this change follows from the modifications to the storage api. The driving factor is the separation of layerUploadState from the storage backend, leaving it to the web application to store and update it. As part of the updates to meet changes in the storage api, support for the size parameter has been completely removed. Signed-off-by: Stephen J Day --- docs/app.go | 4 -- docs/layerupload.go | 96 +++++++++++++++++++++++++++++++-------------- 2 files changed, 67 insertions(+), 33 deletions(-) diff --git a/docs/app.go b/docs/app.go index 72ac4f06..6a79cdfa 100644 --- a/docs/app.go +++ b/docs/app.go @@ -29,8 +29,6 @@ type App struct { // services contains the main services instance for the application. services *storage.Services - tokenProvider tokenProvider - layerHandler storage.LayerHandler accessController auth.AccessController @@ -66,8 +64,6 @@ func NewApp(configuration configuration.Configuration) *App { app.driver = driver app.services = storage.NewServices(app.driver) - app.tokenProvider = newHMACTokenProvider(configuration.HTTP.Secret) - authType := configuration.Auth.Type() if authType != "" { diff --git a/docs/layerupload.go b/docs/layerupload.go index b694a677..158bf7b4 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -5,7 +5,7 @@ import ( "io" "net/http" "net/url" - "strconv" + "os" "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" @@ -33,26 +33,57 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if luh.UUID != "" { luh.log = luh.log.WithField("uuid", luh.UUID) - state, err := ctx.tokenProvider.layerUploadStateFromToken(r.FormValue("_state")) + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logrus.Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) + ctx.log.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + }) + } + luh.State = state + + if state.UUID != luh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.log.Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) } layers := ctx.services.Layers() - upload, err := layers.Resume(state) + upload, err := layers.Resume(luh.Name, luh.UUID) if err != nil && err != storage.ErrLayerUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logrus.Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) + ctx.log.Errorf("error resolving upload: %v", err) + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) }) } - luh.Upload = upload + + if state.Offset > 0 { + // Seek the layer upload to the correct spot if it's non-zero. + // These error conditions should be rare and demonstrate really + // problems. We basically cancel the upload and tell the client to + // start over. + if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { + ctx.log.Infof("error seeking layer upload: %v", err) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + upload.Cancel() + }) + } else if nn != luh.State.Offset { + ctx.log.Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + upload.Cancel() + }) + } + } + handler = closeResources(handler, luh.Upload) } @@ -67,6 +98,8 @@ type layerUploadHandler struct { UUID string Upload storage.LayerUpload + + State layerUploadState } // StartLayerUpload begins the layer upload process and allocates a server- @@ -171,14 +204,30 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. // chunk responses. This sets the correct headers but the response status is // left to the caller. func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { - values := make(url.Values) - stateToken, err := luh.Context.tokenProvider.layerUploadStateToToken(storage.LayerUploadState{Name: luh.Upload.Name(), UUID: luh.Upload.UUID(), Offset: luh.Upload.Offset()}) + + offset, err := luh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + luh.log.Errorf("unable get current offset of layer upload: %v", err) + return err + } + + // TODO(stevvooe): Need a better way to manage the upload state automatically. + luh.State.Name = luh.Name + luh.State.UUID = luh.Upload.UUID() + luh.State.Offset = offset + luh.State.StartedAt = luh.Upload.StartedAt() + + token, err := hmacKey(luh.Config.HTTP.Secret).packUploadState(luh.State) if err != nil { logrus.Infof("error building upload state token: %s", err) return err } - values.Set("_state", stateToken) - uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL(luh.Upload.Name(), luh.Upload.UUID(), values) + + uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL( + luh.Upload.Name(), luh.Upload.UUID(), + url.Values{ + "_state": []string{token}, + }) if err != nil { logrus.Infof("error building upload url: %s", err) return err @@ -186,7 +235,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt w.Header().Set("Location", uploadURL) w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", luh.Upload.Offset())) + w.Header().Set("Range", fmt.Sprintf("0-%d", luh.State.Offset)) return nil } @@ -198,7 +247,6 @@ var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { // If we get a digest and length, we can finish the upload. dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - sizeStr := r.FormValue("size") if dgstStr == "" { return errNotReadyToComplete @@ -209,23 +257,13 @@ func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *htt return err } - var size int64 - if sizeStr != "" { - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return err - } - } else { - size = -1 - } - - luh.completeUpload(w, r, size, dgst) + luh.completeUpload(w, r, dgst) return nil } // completeUpload finishes out the upload with the correct response. -func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { - layer, err := luh.Upload.Finish(size, dgst) +func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, dgst digest.Digest) { + layer, err := luh.Upload.Finish(dgst) if err != nil { luh.Errors.Push(v2.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) From 594263a3f5a63c84f38ca8decfa82d732179268a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 9 Jan 2015 16:09:45 -0800 Subject: [PATCH 006/501] Correctly handle missing layer upload Because we guarded the error check, nil Upload on the handler was getting through to unexpected branches. This directly handles the missing upload ensuring its set as expected. Signed-off-by: Stephen J Day --- docs/layerupload.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docs/layerupload.go b/docs/layerupload.go index 158bf7b4..d597afa6 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -53,11 +53,18 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { layers := ctx.services.Layers() upload, err := layers.Resume(luh.Name, luh.UUID) - if err != nil && err != storage.ErrLayerUploadUnknown { + if err != nil { + ctx.log.Errorf("error resolving upload: %v", err) + if err == storage.ErrLayerUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.log.Errorf("error resolving upload: %v", err) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) }) } luh.Upload = upload @@ -68,6 +75,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { // problems. We basically cancel the upload and tell the client to // start over. if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { + defer upload.Close() ctx.log.Infof("error seeking layer upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -75,6 +83,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { upload.Cancel() }) } else if nn != luh.State.Offset { + defer upload.Close() ctx.log.Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -129,6 +138,7 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return } if err := luh.layerUploadResponse(w, r); err != nil { From 825da388a45c2cc6cb20c5291d39140716446caa Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 16 Jan 2015 18:32:27 -0800 Subject: [PATCH 007/501] Update the registry app to use the new storage interfaces Signed-off-by: Stephen J Day --- docs/app.go | 29 ++++++++++++++++++----------- docs/app_test.go | 13 +++++++++---- docs/context.go | 7 ++++--- docs/images.go | 12 ++++++------ docs/layer.go | 5 ++--- docs/layerupload.go | 18 +++++++++++++----- docs/tags.go | 8 ++++---- 7 files changed, 56 insertions(+), 36 deletions(-) diff --git a/docs/app.go b/docs/app.go index 6a79cdfa..078e3303 100644 --- a/docs/app.go +++ b/docs/app.go @@ -26,8 +26,8 @@ type App struct { // driver maintains the app global storage driver instance. driver storagedriver.StorageDriver - // services contains the main services instance for the application. - services *storage.Services + // registry is the primary registry backend for the app instance. + registry storage.Registry layerHandler storage.LayerHandler @@ -63,7 +63,7 @@ func NewApp(configuration configuration.Configuration) *App { } app.driver = driver - app.services = storage.NewServices(app.driver) + app.registry = storage.NewRegistryWithDriver(app.driver) authType := configuration.Auth.Type() if authType != "" { @@ -136,11 +136,11 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { context := app.context(r) - if err := app.authorized(w, r, context); err != nil { + if err := app.authorized(w, r, context, context.vars["name"]); err != nil { return } - context.log = log.WithField("name", context.Name) + context.log = log.WithField("name", context.Repository.Name()) handler := dispatch(context, r) ssrw := &singleStatusResponseWriter{ResponseWriter: w} @@ -165,7 +165,6 @@ func (app *App) context(r *http.Request) *Context { vars := mux.Vars(r) context := &Context{ App: app, - Name: vars["name"], urlBuilder: v2.NewURLBuilderFromRequest(r), } @@ -175,19 +174,23 @@ func (app *App) context(r *http.Request) *Context { return context } -// authorized checks if the request can proceed with with request access- -// level. If it cannot, the method will return an error. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { +// authorized checks if the request can proceed with access to the requested +// repository. If it succeeds, the repository will be available on the +// context. An error will be if access is not available. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context, repo string) error { if app.accessController == nil { + // No access controller, so we simply provide access. + context.Repository = app.registry.Repository(repo) + return nil // access controller is not enabled. } var accessRecords []auth.Access - if context.Name != "" { + if repo != "" { resource := auth.Resource{ Type: "repository", - Name: context.Name, + Name: repo, } switch r.Method { @@ -256,6 +259,10 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont return err } + // At this point, the request should have access to the repository under + // the requested operation. Make is available on the context. + context.Repository = app.registry.Repository(repo) + return nil } diff --git a/docs/app_test.go b/docs/app_test.go index 4d9535f7..d49c7bbd 100644 --- a/docs/app_test.go +++ b/docs/app_test.go @@ -10,6 +10,8 @@ import ( "github.com/docker/distribution/api/v2" _ "github.com/docker/distribution/auth/silly" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/storage" + "github.com/docker/distribution/storagedriver/inmemory" ) // TestAppDispatcher builds an application with a test dispatcher and ensures @@ -17,9 +19,12 @@ import ( // This only tests the dispatch mechanism. The underlying dispatchers must be // tested individually. func TestAppDispatcher(t *testing.T) { + driver := inmemory.New() app := &App{ - Config: configuration.Configuration{}, - router: v2.Router(), + Config: configuration.Configuration{}, + router: v2.Router(), + driver: driver, + registry: storage.NewRegistryWithDriver(driver), } server := httptest.NewServer(app) router := v2.Router() @@ -32,8 +37,8 @@ func TestAppDispatcher(t *testing.T) { varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context - if ctx.Name != ctx.vars["name"] { - t.Fatalf("unexpected name: %q != %q", ctx.Name, "foo/bar") + if ctx.Repository.Name() != ctx.vars["name"] { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") } // Check that we have all that is expected diff --git a/docs/context.go b/docs/context.go index 88193cda..8e8d0fed 100644 --- a/docs/context.go +++ b/docs/context.go @@ -3,6 +3,7 @@ package registry import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/storage" ) // Context should contain the request specific context for use in across @@ -12,9 +13,9 @@ type Context struct { // App points to the application structure that created this context. *App - // Name is the prefix for the current request. Corresponds to the - // namespace/repository associated with the image. - Name string + // Repository is the repository for the current request. All requests + // should be scoped to a single repository. This field may be nil. + Repository storage.Repository // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the diff --git a/docs/images.go b/docs/images.go index a6b55859..3d6feeed 100644 --- a/docs/images.go +++ b/docs/images.go @@ -38,8 +38,8 @@ type imageManifestHandler struct { // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - manifests := imh.services.Manifests() - manifest, err := manifests.Get(imh.Name, imh.Tag) + manifests := imh.Repository.Manifests() + manifest, err := manifests.Get(imh.Tag) if err != nil { imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) @@ -54,7 +54,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { - manifests := imh.services.Manifests() + manifests := imh.Repository.Manifests() dec := json.NewDecoder(r.Body) var manifest manifest.SignedManifest @@ -64,7 +64,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - if err := manifests.Put(imh.Name, imh.Tag, &manifest); err != nil { + if err := manifests.Put(imh.Tag, &manifest); err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. switch err := err.(type) { @@ -96,8 +96,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http // DeleteImageManifest removes the image with the given tag from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { - manifests := imh.services.Manifests() - if err := manifests.Delete(imh.Name, imh.Tag); err != nil { + manifests := imh.Repository.Manifests() + if err := manifests.Delete(imh.Tag); err != nil { switch err := err.(type) { case storage.ErrUnknownManifest: imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) diff --git a/docs/layer.go b/docs/layer.go index 836df3b7..bea1cc8b 100644 --- a/docs/layer.go +++ b/docs/layer.go @@ -42,9 +42,8 @@ type layerHandler struct { // GetLayer fetches the binary data from backend storage returns it in the // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { - layers := lh.services.Layers() - - layer, err := layers.Fetch(lh.Name, lh.Digest) + layers := lh.Repository.Layers() + layer, err := layers.Fetch(lh.Digest) if err != nil { switch err := err.(type) { diff --git a/docs/layerupload.go b/docs/layerupload.go index d597afa6..5cd445a5 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -43,6 +43,14 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } luh.State = state + if state.Name != ctx.Repository.Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.log.Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + }) + } + if state.UUID != luh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx.log.Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) @@ -51,8 +59,8 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { }) } - layers := ctx.services.Layers() - upload, err := layers.Resume(luh.Name, luh.UUID) + layers := ctx.Repository.Layers() + upload, err := layers.Resume(luh.UUID) if err != nil { ctx.log.Errorf("error resolving upload: %v", err) if err == storage.ErrLayerUploadUnknown { @@ -114,8 +122,8 @@ type layerUploadHandler struct { // StartLayerUpload begins the layer upload process and allocates a server- // side upload session. func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { - layers := luh.services.Layers() - upload, err := layers.Upload(luh.Name) + layers := luh.Repository.Layers() + upload, err := layers.Upload() if err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? luh.Errors.Push(v2.ErrorCodeUnknown, err) @@ -222,7 +230,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt } // TODO(stevvooe): Need a better way to manage the upload state automatically. - luh.State.Name = luh.Name + luh.State.Name = luh.Repository.Name() luh.State.UUID = luh.Upload.UUID() luh.State.Offset = offset luh.State.StartedAt = luh.Upload.StartedAt() diff --git a/docs/tags.go b/docs/tags.go index 18f6add2..1f745c6a 100644 --- a/docs/tags.go +++ b/docs/tags.go @@ -33,14 +33,14 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - manifests := th.services.Manifests() + manifests := th.Repository.Manifests() - tags, err := manifests.Tags(th.Name) + tags, err := manifests.Tags() if err != nil { switch err := err.(type) { case storage.ErrUnknownRepository: w.WriteHeader(404) - th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) + th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) default: th.Errors.PushErr(err) } @@ -51,7 +51,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ - Name: th.Name, + Name: th.Repository.Name(), Tags: tags, }); err != nil { th.Errors.PushErr(err) From acfcc955deeda2987733993fd1d04459bf98c662 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 20 Jan 2015 12:05:12 -0800 Subject: [PATCH 008/501] Add Docker Distribution API Version header Setting a header for all responses can help clients better determine if the server speaks the legacy v1 API or the v2 API. It is important that the header be set *BEFORE* routing the request. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/app.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/app.go b/docs/app.go index 6a79cdfa..05112731 100644 --- a/docs/app.go +++ b/docs/app.go @@ -88,6 +88,8 @@ func NewApp(configuration configuration.Configuration) *App { } func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") app.router.ServeHTTP(w, r) } From 33a1f4ef7d4552cb423f446d544af95ca0965259 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 29 Jan 2015 21:26:35 -0800 Subject: [PATCH 009/501] Address server errors received during layer upload This changeset addresses intermittent internal server errors encountered during pushes. The root cause has been isolated to layers that result in identical, empty filesystems but may have some path declarations (imaginge "./"), resulting in different tarsums. The main error message reported during these upload problems was a 500 error, which was not correct. Further investigation showed the errors to be rooted in digest verification when finishing uploads. Inspection of the surrounding code also identified a few issues. PutLayerChunk was slightly refactered into PutLayerUploadComplete. Helper methods were avoided to make handler less confusing. This simplification leveraged an earlier change in the spec that moved non-complete chunk uploads to the PATCH method. Simple logging was also added in the unknown error case that should help to avoid mysterious 500 errors in the future. At the same time, the glaring omission of a proper layer upload cancel method was rectified. This has been added in this change so it is not missed in the future. In the future, we may want to refactor the handler code to be more straightforward, hopefully letting us avoid these problems in the future. Added test cases that reproduce these errors and drove these changes include the following: 1. Push a layer with an empty body results in invalid blob upload. 2. Push a layer with a different tarsum (in this case, empty tar) 3. Deleting a layer upload works. 4. Getting status on a deleted layer upload returns 404. Common functionality was grouped into shared functions to remove repitition. The API tests will still require future love. Signed-off-by: Stephen J Day --- docs/api_test.go | 208 +++++++++++++++++++++++++------------------- docs/layerupload.go | 169 +++++++++++++++++------------------ 2 files changed, 198 insertions(+), 179 deletions(-) diff --git a/docs/api_test.go b/docs/api_test.go index b0f3bb2b..68254920 100644 --- a/docs/api_test.go +++ b/docs/api_test.go @@ -11,6 +11,7 @@ import ( "net/http/httputil" "net/url" "os" + "reflect" "testing" "github.com/docker/distribution/api/v2" @@ -120,29 +121,59 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) // ------------------------------------------ - // Upload a layer - layerUploadURL, err := builder.BuildBlobUploadURL(imageName) + // Start an upload and cancel + uploadURLBase := startPushLayer(t, builder, imageName) + + req, err := http.NewRequest("DELETE", uploadURLBase, nil) if err != nil { - t.Fatalf("error building upload url: %v", err) + t.Fatalf("unexpected error creating delete request: %v", err) } - resp, err = http.Post(layerUploadURL, "", nil) + resp, err = http.DefaultClient.Do(req) if err != nil { - t.Fatalf("error starting layer upload: %v", err) + t.Fatalf("unexpected error sending delete request: %v", err) } - checkResponse(t, "starting layer upload", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Content-Length": []string{"0"}, - }) + checkResponse(t, "deleting upload", resp, http.StatusNoContent) + // A status check should result in 404 + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) + + // ----------------------------------------- + // Do layer push with an empty body + uploadURLBase = startPushLayer(t, builder, imageName) + resp, err = doPushLayer(t, builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeBlobUploadInvalid) + + // ----------------------------------------- + // Do layer push with an invalid body + + // This is a valid but empty tarfile! + badTar := bytes.Repeat([]byte("\x00"), 1024) + uploadURLBase = startPushLayer(t, builder, imageName) + resp, err = doPushLayer(t, builder, imageName, layerDigest, uploadURLBase, bytes.NewReader(badTar)) + if err != nil { + t.Fatalf("unexpected error doing bad layer push: %v", err) + } + + checkResponse(t, "bad layer push", resp, http.StatusBadRequest) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + + // ------------------------------------------ + // Now, actually do successful upload. layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - // TODO(sday): Cancel the layer upload here and restart. - - uploadURLBase := startPushLayer(t, builder, imageName) + uploadURLBase = startPushLayer(t, builder, imageName) pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------ @@ -218,27 +249,7 @@ func TestManifestAPI(t *testing.T) { defer resp.Body.Close() checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) - - // TODO(stevvooe): Shoot. The error setup is not working out. The content- - // type headers are being set after writing the status code. - // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { - // t.Fatalf("unexpected content type: %v != 'application/json'", - // resp.Header.Get("Content-Type")) - // } - dec := json.NewDecoder(resp.Body) - - var respErrs v2.Errors - if err := dec.Decode(&respErrs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if len(respErrs.Errors) == 0 { - t.Fatalf("expected errors in response") - } - - if respErrs.Errors[0].Code != v2.ErrorCodeManifestUnknown { - t.Fatalf("expected manifest unknown error: got %v", respErrs) - } + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) tagsURL, err := builder.BuildTagsURL(imageName) if err != nil { @@ -253,18 +264,7 @@ func TestManifestAPI(t *testing.T) { // Check that we get an unknown repository error when asking for tags checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&respErrs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) - } - - if len(respErrs.Errors) == 0 { - t.Fatalf("expected errors in response") - } - - if respErrs.Errors[0].Code != v2.ErrorCodeNameUnknown { - t.Fatalf("expected respository unknown error: got %v", respErrs) - } + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) // -------------------------------- // Attempt to push unsigned manifest with missing layers @@ -284,41 +284,17 @@ func TestManifestAPI(t *testing.T) { resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) defer resp.Body.Close() checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, + v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) - dec = json.NewDecoder(resp.Body) - if err := dec.Decode(&respErrs); err != nil { - t.Fatalf("unexpected error decoding error response: %v", err) + expectedCounts := map[v2.ErrorCode]int{ + v2.ErrorCodeManifestUnverified: 1, + v2.ErrorCodeBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, } - var unverified int - var missingLayers int - var invalidDigests int - - for _, err := range respErrs.Errors { - switch err.Code { - case v2.ErrorCodeManifestUnverified: - unverified++ - case v2.ErrorCodeBlobUnknown: - missingLayers++ - case v2.ErrorCodeDigestInvalid: - // TODO(stevvooe): This error isn't quite descriptive enough -- - // the layer with an invalid digest isn't identified. - invalidDigests++ - default: - t.Fatalf("unexpected error: %v", err) - } - } - - if unverified != 1 { - t.Fatalf("should have received one unverified manifest error: %v", respErrs) - } - - if missingLayers != 2 { - t.Fatalf("should have received two missing layer errors: %v", respErrs) - } - - if invalidDigests != 2 { - t.Fatalf("should have received two invalid digest errors: %v", respErrs) + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // TODO(stevvooe): Add a test case where we take a mostly valid registry, @@ -363,7 +339,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) var fetchedManifest manifest.SignedManifest - dec = json.NewDecoder(resp.Body) + dec := json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } @@ -448,11 +424,9 @@ func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { return resp.Header.Get("Location") } -// pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { - rsLength, _ := rs.Seek(0, os.SEEK_END) - rs.Seek(0, os.SEEK_SET) - +// doPushLayer pushes the layer content returning the url on success returning +// the response. If you're only expecting a successful response, use pushLayer. +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { u, err := url.Parse(uploadURLBase) if err != nil { t.Fatalf("unexpected error parsing pushLayer url: %v", err) @@ -462,23 +436,24 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, "_state": u.Query()["_state"], "digest": []string{dgst.String()}, - - // TODO(stevvooe): Layer upload can be completed with and without size - // argument. We'll need to add a test that checks the latter path. - "size": []string{fmt.Sprint(rsLength)}, }.Encode() uploadURL := u.String() // Just do a monolithic upload - req, err := http.NewRequest("PUT", uploadURL, rs) + req, err := http.NewRequest("PUT", uploadURL, body) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } - resp, err := http.DefaultClient.Do(req) + return http.DefaultClient.Do(req) +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, body) if err != nil { - t.Fatalf("unexpected error doing put: %v", err) + t.Fatalf("unexpected error doing push layer request: %v", err) } defer resp.Body.Close() @@ -506,6 +481,57 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus } } +// checkBodyHasErrorCodes ensures the body is an error body and has the +// expected error codes, returning the error structure, the json slice and a +// count of the errors by code. +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...v2.ErrorCode) (v2.Errors, []byte, map[v2.ErrorCode]int) { + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading body %s: %v", msg, err) + } + + var errs v2.Errors + if err := json.Unmarshal(p, &errs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(errs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + + expected := map[v2.ErrorCode]struct{}{} + counts := map[v2.ErrorCode]int{} + + // Initialize map with zeros for expected + for _, code := range errorCodes { + expected[code] = struct{}{} + counts[code] = 0 + } + + for _, err := range errs.Errors { + if _, ok := expected[err.Code]; !ok { + t.Fatalf("unexpected error code %v encountered: %s ", err.Code, string(p)) + } + counts[err.Code]++ + } + + // Ensure that counts of expected errors were all non-zero + for code := range expected { + if counts[code] == 0 { + t.Fatalf("expected error code %v not encounterd: %s", code, string(p)) + } + } + + return errs, p, counts +} + func maybeDumpResponse(t *testing.T, resp *http.Response) { if d, err := httputil.DumpResponse(resp, true); err != nil { t.Logf("error dumping response: %v", err) diff --git a/docs/layerupload.go b/docs/layerupload.go index 5cd445a5..e9585b0e 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -23,10 +23,12 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(luh.StartLayerUpload), - "GET": http.HandlerFunc(luh.GetUploadStatus), - "HEAD": http.HandlerFunc(luh.GetUploadStatus), - "PUT": http.HandlerFunc(luh.PutLayerChunk), + "POST": http.HandlerFunc(luh.StartLayerUpload), + "GET": http.HandlerFunc(luh.GetUploadStatus), + "HEAD": http.HandlerFunc(luh.GetUploadStatus), + // TODO(stevvooe): Must implement patch support. + // "PATCH": http.HandlerFunc(luh.PutLayerChunk), + "PUT": http.HandlerFunc(luh.PutLayerUploadComplete), "DELETE": http.HandlerFunc(luh.CancelLayerUpload), }) @@ -158,55 +160,80 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re w.WriteHeader(http.StatusNoContent) } -// PutLayerChunk receives a layer chunk during the layer upload process, -// possible completing the upload with a checksum and length. -func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { +// PutLayerUploadComplete takes the final request of a layer upload. The final +// chunk may include all the layer data, the final chunk of layer data or no +// layer data. Any data provided is received and verified. If successful, the +// layer is linked into the blob store and 201 Created is returned with the +// canonical url of the layer. +func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - } - - var finished bool - - // TODO(stevvooe): This is woefully incomplete. Missing stuff: - // - // 1. Extract information from range header, if present. - // 2. Check offset of current layer. - // 3. Emit correct error responses. - - // Read in the chunk - io.Copy(luh.Upload, r.Body) - - if err := luh.maybeCompleteUpload(w, r); err != nil { - if err != errNotReadyToComplete { - switch err := err.(type) { - case storage.ErrLayerInvalidSize: - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeSizeInvalid, err) - return - case storage.ErrLayerInvalidDigest: - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - return - default: - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - } - } - - if err := luh.layerUploadResponse(w, r); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) return } - if finished { - w.WriteHeader(http.StatusCreated) - } else { - w.WriteHeader(http.StatusAccepted) + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") + return } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") + return + } + + // TODO(stevvooe): Check the incoming range header here, per the + // specification. LayerUpload should be seeked (sought?) to that position. + + // Read in the final chunk, if any. + io.Copy(luh.Upload, r.Body) + + layer, err := luh.Upload.Finish(dgst) + if err != nil { + switch err := err.(type) { + case storage.ErrLayerUploadUnavailable: + w.WriteHeader(http.StatusBadRequest) + // TODO(stevvooe): Arguably, we may want to add an error code to + // cover this condition. It is not always a client error but it + // may be. For now, we effectively throw out the upload and have + // them start over. + luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err.Err) + case storage.ErrLayerInvalidDigest: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + default: + luh.log.Errorf("unknown error completing upload: %#v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + } + + // Clean up the backend layer data if there was an error. + if err := luh.Upload.Cancel(); err != nil { + // If the cleanup fails, all we can do is observe and report. + luh.log.Errorf("error canceling upload after error: %v", err) + } + + return + } + + // Build our canonical layer url + layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) + if err != nil { + luh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Location", layerURL) + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusCreated) } // CancelLayerUpload cancels an in-progress upload of a layer. @@ -214,8 +241,16 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return } + if err := luh.Upload.Cancel(); err != nil { + luh.log.Errorf("error encountered canceling upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.PushErr(err) + } + + w.WriteHeader(http.StatusNoContent) } // layerUploadResponse provides a standard request for uploading layers and @@ -257,45 +292,3 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt return nil } - -var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") - -// maybeCompleteUpload tries to complete the upload if the correct parameters -// are available. Returns errNotReadyToComplete if not ready to complete. -func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { - // If we get a digest and length, we can finish the upload. - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - return errNotReadyToComplete - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - return err - } - - luh.completeUpload(w, r, dgst) - return nil -} - -// completeUpload finishes out the upload with the correct response. -func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, dgst digest.Digest) { - layer, err := luh.Upload.Finish(dgst) - if err != nil { - luh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) - if err != nil { - luh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - w.Header().Set("Location", layerURL) - w.Header().Set("Content-Length", "0") - w.WriteHeader(http.StatusCreated) -} From b6270d9c14caf4627e9ca42e15c0f573c428cee6 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 2 Feb 2015 13:01:49 -0800 Subject: [PATCH 010/501] Handle empty blob files more appropriately Several API tests were added to ensure correct acceptance of zero-size and empty tar files. This led to several changes in the storage backend around the guarantees of remote file reading, which backs the layer and layer upload type. In support of these changes, zero-length and empty checks have been added to the digest package. These provide a sanity check against upstream tarsum changes. The fileReader has been modified to be more robust when reading and seeking on zero-length or non-existent files. The file no longer needs to exist for the reader to be created. Seeks can now move beyond the end of the file, causing reads to issue an io.EOF. This eliminates errors during certain race conditions for reading files which should be detected by stat calls. As a part of this, a few error types were factored out and the read buffer size was increased to something more reasonable. Signed-off-by: Stephen J Day --- docs/api_test.go | 35 ++++++++++++++++++++++------------- docs/layerupload.go | 7 ------- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/docs/api_test.go b/docs/api_test.go index 68254920..5f9e6c38 100644 --- a/docs/api_test.go +++ b/docs/api_test.go @@ -144,7 +144,7 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) // ----------------------------------------- - // Do layer push with an empty body + // Do layer push with an empty body and different digest uploadURLBase = startPushLayer(t, builder, imageName) resp, err = doPushLayer(t, builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { @@ -152,21 +152,30 @@ func TestLayerAPI(t *testing.T) { } checkResponse(t, "bad layer push", resp, http.StatusBadRequest) - checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeBlobUploadInvalid) + checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) // ----------------------------------------- - // Do layer push with an invalid body - - // This is a valid but empty tarfile! - badTar := bytes.Repeat([]byte("\x00"), 1024) - uploadURLBase = startPushLayer(t, builder, imageName) - resp, err = doPushLayer(t, builder, imageName, layerDigest, uploadURLBase, bytes.NewReader(badTar)) + // Do layer push with an empty body and correct digest + zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) if err != nil { - t.Fatalf("unexpected error doing bad layer push: %v", err) + t.Fatalf("unexpected error digesting empty buffer: %v", err) } - checkResponse(t, "bad layer push", resp, http.StatusBadRequest) - checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) + uploadURLBase = startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + + // ----------------------------------------- + // Do layer push with an empty body and correct digest + + // This is a valid but empty tarfile! + emptyTar := bytes.Repeat([]byte("\x00"), 1024) + emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) + if err != nil { + t.Fatalf("unexpected error digesting empty tar: %v", err) + } + + uploadURLBase = startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ // Now, actually do successful upload. @@ -517,7 +526,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error for _, err := range errs.Errors { if _, ok := expected[err.Code]; !ok { - t.Fatalf("unexpected error code %v encountered: %s ", err.Code, string(p)) + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.Code, msg, string(p)) } counts[err.Code]++ } @@ -525,7 +534,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error // Ensure that counts of expected errors were all non-zero for code := range expected { if counts[code] == 0 { - t.Fatalf("expected error code %v not encounterd: %s", code, string(p)) + t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) } } diff --git a/docs/layerupload.go b/docs/layerupload.go index e9585b0e..cfce98f3 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -198,13 +198,6 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * layer, err := luh.Upload.Finish(dgst) if err != nil { switch err := err.(type) { - case storage.ErrLayerUploadUnavailable: - w.WriteHeader(http.StatusBadRequest) - // TODO(stevvooe): Arguably, we may want to add an error code to - // cover this condition. It is not always a client error but it - // may be. For now, we effectively throw out the upload and have - // them start over. - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err.Err) case storage.ErrLayerInvalidDigest: w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) From 2aed7c2d0ce0f0451ac7bba61705d9da75d72c74 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 28 Jan 2015 15:55:18 -0800 Subject: [PATCH 011/501] Webhook notification support in registry webapp Endpoints are now created at applications startup time, using notification configuration. The instances are then added to a Broadcaster instance, which becomes the main event sink for the application. At request time, an event bridge is configured to listen to repository method calls. The actor and source of the eventBridge are created from the requeest context and application, respectively. The result is notifications are dispatched with calls to the context's Repository instance and are queued to each endpoint via the broadcaster. This commit also adds the concept of a RequestID and App.InstanceID. The request id uniquely identifies each request and the InstanceID uniquely identifies a run of the registry. These identifiers can be used in the future to correlate log messages with generated events to support rich debugging. The fields of the app were slightly reorganized for clarity and a few horrid util functions have been removed. Signed-off-by: Stephen J Day --- docs/app.go | 121 +++++++++++++++++++++++++++++++++++++++--------- docs/context.go | 3 ++ docs/util.go | 27 ----------- 3 files changed, 102 insertions(+), 49 deletions(-) delete mode 100644 docs/util.go diff --git a/docs/app.go b/docs/app.go index b5cb6776..e7c96b74 100644 --- a/docs/app.go +++ b/docs/app.go @@ -2,16 +2,19 @@ package registry import ( "fmt" + "net" "net/http" + "os" + "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" "github.com/docker/distribution/auth" "github.com/docker/distribution/configuration" "github.com/docker/distribution/storage" + "github.com/docker/distribution/storage/notifications" "github.com/docker/distribution/storagedriver" "github.com/docker/distribution/storagedriver/factory" - - log "github.com/Sirupsen/logrus" "github.com/gorilla/mux" ) @@ -21,17 +24,22 @@ import ( type App struct { Config configuration.Configuration - router *mux.Router + // InstanceID is a unique id assigned to the application on each creation. + // Provides information in the logs and context to identify restarts. + InstanceID string - // driver maintains the app global storage driver instance. - driver storagedriver.StorageDriver + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry storage.Registry // registry is the primary registry backend for the app instance. + accessController auth.AccessController // main access controller for application - // registry is the primary registry backend for the app instance. - registry storage.Registry + // events contains notification related configuration. + events struct { + sink notifications.Sink + source notifications.SourceRecord + } - layerHandler storage.LayerHandler - - accessController auth.AccessController + layerHandler storage.LayerHandler // allows dispatch of layer serving to external provider } // NewApp takes a configuration and returns a configured app, ready to serve @@ -39,8 +47,9 @@ type App struct { // handlers accordingly. func NewApp(configuration configuration.Configuration) *App { app := &App{ - Config: configuration, - router: v2.Router(), + Config: configuration, + InstanceID: uuid.New(), + router: v2.Router(), } // Register the handler dispatchers. @@ -53,7 +62,8 @@ func NewApp(configuration configuration.Configuration) *App { app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) - driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + var err error + app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) if err != nil { // TODO(stevvooe): Move the creation of a service into a protected @@ -62,7 +72,7 @@ func NewApp(configuration configuration.Configuration) *App { panic(err) } - app.driver = driver + app.configureEvents(&configuration) app.registry = storage.NewRegistryWithDriver(app.driver) authType := configuration.Auth.Type() @@ -77,7 +87,7 @@ func NewApp(configuration configuration.Configuration) *App { layerHandlerType := configuration.LayerHandler.Type() if layerHandlerType != "" { - lh, err := storage.GetLayerHandler(layerHandlerType, configuration.LayerHandler.Parameters(), driver) + lh, err := storage.GetLayerHandler(layerHandlerType, configuration.LayerHandler.Parameters(), app.driver) if err != nil { panic(fmt.Sprintf("unable to configure layer handler (%s): %v", layerHandlerType, err)) } @@ -87,12 +97,6 @@ func NewApp(configuration configuration.Configuration) *App { return app } -func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Set a header with the Docker Distribution API Version for all responses. - w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") - app.router.ServeHTTP(w, r) -} - // register a handler with the application, by route name. The handler will be // passed through the application filters and context will be constructed at // request time. @@ -107,6 +111,59 @@ func (app *App) register(routeName string, dispatch dispatchFunc) { app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) } +// configureEvents prepares the event sink for action. +func (app *App) configureEvents(configuration *configuration.Configuration) { + // Configure all of the endpoint sinks. + var sinks []notifications.Sink + for _, endpoint := range configuration.Notifications.Endpoints { + if endpoint.Disabled { + log.Infof("endpoint %s disabled, skipping", endpoint.Name) + continue + } + + log.Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ + Timeout: endpoint.Timeout, + Threshold: endpoint.Threshold, + Backoff: endpoint.Backoff, + Headers: endpoint.Headers, + }) + + sinks = append(sinks, endpoint) + } + + // NOTE(stevvooe): Moving to a new queueing implementation is as easy as + // replacing broadcaster with a rabbitmq implementation. It's recommended + // that the registry instances also act as the workers to keep deployment + // simple. + app.events.sink = notifications.NewBroadcaster(sinks...) + + // Populate registry event source + hostname, err := os.Hostname() + if err != nil { + hostname = configuration.HTTP.Addr + } else { + // try to pick the port off the config + _, port, err := net.SplitHostPort(configuration.HTTP.Addr) + if err == nil { + hostname = net.JoinHostPort(hostname, port) + } + } + + app.events.source = notifications.SourceRecord{ + Addr: hostname, + InstanceID: app.InstanceID, + } +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() // ensure that request body is always closed. + + // Set a header with the Docker Distribution API Version for all responses. + w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") + app.router.ServeHTTP(w, r) +} + // dispatchFunc takes a context and request and returns a constructed handler // for the route. The dispatcher will use this to dynamically create request // specific handlers for each endpoint without creating a new router for each @@ -142,11 +199,14 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return } + // decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + context.Repository, app.eventBridge(context, r)) + context.log = log.WithField("name", context.Repository.Name()) handler := dispatch(context, r) ssrw := &singleStatusResponseWriter{ResponseWriter: w} - context.log.Infoln("handler", resolveHandlerName(r.Method, handler)) handler.ServeHTTP(ssrw, r) // Automated error response handling here. Handlers may return their @@ -167,6 +227,7 @@ func (app *App) context(r *http.Request) *Context { vars := mux.Vars(r) context := &Context{ App: app, + RequestID: uuid.New(), urlBuilder: v2.NewURLBuilderFromRequest(r), } @@ -268,6 +329,22 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont return nil } +// eventBridge returns a bridge for the current request, configured with the +// correct actor and source. +func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { + // TODO(stevvooe): Need to extract user data from request context using + // auth system. Would prefer to do this during logging refactor and + // addition of user and google context type. + actor := notifications.ActorRecord{ + Name: "--todo--", + Addr: r.RemoteAddr, + Host: r.Host, + RequestID: ctx.RequestID, + } + + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, app.events.sink) +} + // apiBase implements a simple yes-man for doing overall checks against the // api. This can support auth roundtrips to support docker login. func apiBase(w http.ResponseWriter, r *http.Request) { diff --git a/docs/context.go b/docs/context.go index 8e8d0fed..eaa603a8 100644 --- a/docs/context.go +++ b/docs/context.go @@ -13,6 +13,9 @@ type Context struct { // App points to the application structure that created this context. *App + // RequestID is the unique id of the request. + RequestID string + // Repository is the repository for the current request. All requests // should be scoped to a single repository. This field may be nil. Repository storage.Repository diff --git a/docs/util.go b/docs/util.go deleted file mode 100644 index 976ddf31..00000000 --- a/docs/util.go +++ /dev/null @@ -1,27 +0,0 @@ -package registry - -import ( - "net/http" - "reflect" - "runtime" - - "github.com/gorilla/handlers" -) - -// functionName returns the name of the function fn. -func functionName(fn interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() -} - -// resolveHandlerName attempts to resolve a nice, pretty name for the passed -// in handler. -func resolveHandlerName(method string, handler http.Handler) string { - switch v := handler.(type) { - case handlers.MethodHandler: - return functionName(v[method]) - case http.HandlerFunc: - return functionName(v) - default: - return functionName(handler.ServeHTTP) - } -} From 1089cae282196e8fe0cbc5733882787cf4c1b7a3 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 3 Feb 2015 13:28:10 -0800 Subject: [PATCH 012/501] Separate request data from actor in Event To clarify the role of actor, the request data that initiates an event has been separated. The ActorRecord is pared down to just the username. This eliminates confusion about where event related data should be added. Signed-off-by: Stephen J Day --- docs/app.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/app.go b/docs/app.go index e7c96b74..53759a1e 100644 --- a/docs/app.go +++ b/docs/app.go @@ -336,13 +336,11 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene // auth system. Would prefer to do this during logging refactor and // addition of user and google context type. actor := notifications.ActorRecord{ - Name: "--todo--", - Addr: r.RemoteAddr, - Host: r.Host, - RequestID: ctx.RequestID, + Name: "--todo--", } + request := notifications.NewRequestRecord(ctx.RequestID, r) - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, app.events.sink) + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) } // apiBase implements a simple yes-man for doing overall checks against the From 1f06e4f816404b0c702d8b6296ca91e98f738304 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 3 Feb 2015 18:27:40 -0800 Subject: [PATCH 013/501] Manifest PUT should return 202 Accepted status Signed-off-by: Stephen J Day --- docs/api_test.go | 3 +-- docs/images.go | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/api_test.go b/docs/api_test.go index 5f9e6c38..aa70e504 100644 --- a/docs/api_test.go +++ b/docs/api_test.go @@ -336,8 +336,7 @@ func TestManifestAPI(t *testing.T) { } resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - - checkResponse(t, "putting signed manifest", resp, http.StatusOK) + checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) resp, err = http.Get(manifestURL) if err != nil { diff --git a/docs/images.go b/docs/images.go index 3d6feeed..db6bd705 100644 --- a/docs/images.go +++ b/docs/images.go @@ -92,6 +92,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http w.WriteHeader(http.StatusBadRequest) return } + + w.WriteHeader(http.StatusAccepted) } // DeleteImageManifest removes the image with the given tag from the registry. From 904b35a24f80c08b42ca1f6e737fd3903ec744a5 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 3 Feb 2015 17:59:24 -0800 Subject: [PATCH 014/501] Use context for auth access controllers The auth package has been updated to use "golang.org/x/net/context" for passing information between the application and the auth backend. AccessControllers should now set a "auth.user" context value to a AuthUser struct containing a single "Name" field for now with possible, optional, values in the future. The "silly" auth backend always sets the name to "silly", while the "token" auth backend will set the name to match the "subject" claim of the JWT. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/app.go | 19 ++++++++++++++----- docs/context.go | 4 ++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/docs/app.go b/docs/app.go index 53759a1e..6e5480c3 100644 --- a/docs/app.go +++ b/docs/app.go @@ -16,6 +16,7 @@ import ( "github.com/docker/distribution/storagedriver" "github.com/docker/distribution/storagedriver/factory" "github.com/gorilla/mux" + "golang.org/x/net/context" ) // App is a global registry application object. Shared resources can be placed @@ -189,6 +190,12 @@ func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { ssrw.ResponseWriter.WriteHeader(status) } +// WithRequest adds an http request to the given context and requents +// a new context with an "http.request" value. +func WithRequest(ctx context.Context, r *http.Request) context.Context { + return context.WithValue(ctx, "http.request", r) +} + // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { @@ -301,7 +308,8 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont } } - if err := app.accessController.Authorized(r, accessRecords...); err != nil { + authCtx, err := app.accessController.Authorized(WithRequest(nil, r), accessRecords...) + if err != nil { switch err := err.(type) { case auth.Challenge: w.Header().Set("Content-Type", "application/json; charset=utf-8") @@ -322,6 +330,10 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont return err } + // The authorized context should contain an auth.UserInfo + // object. If it doesn't, just use the zero value for now. + context.AuthUserInfo, _ = authCtx.Value("auth.user").(auth.UserInfo) + // At this point, the request should have access to the repository under // the requested operation. Make is available on the context. context.Repository = app.registry.Repository(repo) @@ -332,11 +344,8 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // eventBridge returns a bridge for the current request, configured with the // correct actor and source. func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { - // TODO(stevvooe): Need to extract user data from request context using - // auth system. Would prefer to do this during logging refactor and - // addition of user and google context type. actor := notifications.ActorRecord{ - Name: "--todo--", + Name: ctx.AuthUserInfo.Name, } request := notifications.NewRequestRecord(ctx.RequestID, r) diff --git a/docs/context.go b/docs/context.go index eaa603a8..150e5de6 100644 --- a/docs/context.go +++ b/docs/context.go @@ -3,6 +3,7 @@ package registry import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/auth" "github.com/docker/distribution/storage" ) @@ -25,6 +26,9 @@ type Context struct { // handler *must not* start the response via http.ResponseWriter. Errors v2.Errors + // AuthUserInfo contains information about an authorized client. + AuthUserInfo auth.UserInfo + // vars contains the extracted gorilla/mux variables that can be used for // assignment. vars map[string]string From 3e84069959b650440ca8f4bdaf4f1f3b4b7a5bac Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 6 Feb 2015 16:19:19 -0800 Subject: [PATCH 015/501] Integrate contextual logging with regsitry app This changeset integrates contextual logging into the registry web application. Idiomatic context use is attempted within the current webapp layout. The functionality is centered around making lifecycle objects (application and request context) into contexts themselves. Relevant data has been moved into the context where appropriate. We still have some work to do to factor out the registry.Context object and the dispatching functionality to remove some awkward portions. The api tests were slightly refactored to use a test environment to eliminate common code. Signed-off-by: Stephen J Day --- docs/api_test.go | 120 +++++++++++++++++++------------------- docs/app.go | 77 +++++++++++++++--------- docs/app_test.go | 13 +++-- docs/basicauth.go | 11 ++++ docs/basicauth_prego14.go | 41 +++++++++++++ docs/context.go | 80 ++++++++++++++++++++----- docs/images.go | 8 ++- docs/layer.go | 15 +++-- docs/layerupload.go | 30 +++++----- 9 files changed, 266 insertions(+), 129 deletions(-) create mode 100644 docs/basicauth.go create mode 100644 docs/basicauth_prego14.go diff --git a/docs/api_test.go b/docs/api_test.go index aa70e504..5e3bd72c 100644 --- a/docs/api_test.go +++ b/docs/api_test.go @@ -22,26 +22,15 @@ import ( "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" + "golang.org/x/net/context" ) // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } + env := newTestEnv(t) - app := NewApp(config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL) - - if err != nil { - t.Fatalf("error creating url builder: %v", err) - } - - baseURL, err := builder.BuildBaseURL() + baseURL, err := env.builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) } @@ -73,20 +62,7 @@ func TestLayerAPI(t *testing.T) { // TODO(stevvooe): This test code is complete junk but it should cover the // complete flow. This must be broken down and checked against the // specification *before* we submit the final to docker core. - - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } - - app := NewApp(config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL) - - if err != nil { - t.Fatalf("error creating url builder: %v", err) - } + env := newTestEnv(t) imageName := "foo/bar" // "build" our layer file @@ -99,7 +75,7 @@ func TestLayerAPI(t *testing.T) { // ----------------------------------- // Test fetch for non-existent content - layerURL, err := builder.BuildBlobURL(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) if err != nil { t.Fatalf("error building url: %v", err) } @@ -122,7 +98,7 @@ func TestLayerAPI(t *testing.T) { // ------------------------------------------ // Start an upload and cancel - uploadURLBase := startPushLayer(t, builder, imageName) + uploadURLBase := startPushLayer(t, env.builder, imageName) req, err := http.NewRequest("DELETE", uploadURLBase, nil) if err != nil { @@ -145,8 +121,8 @@ func TestLayerAPI(t *testing.T) { // ----------------------------------------- // Do layer push with an empty body and different digest - uploadURLBase = startPushLayer(t, builder, imageName) - resp, err = doPushLayer(t, builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) + uploadURLBase = startPushLayer(t, env.builder, imageName) + resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) } @@ -161,8 +137,8 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("unexpected error digesting empty buffer: %v", err) } - uploadURLBase = startPushLayer(t, builder, imageName) - pushLayer(t, builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) + uploadURLBase = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- // Do layer push with an empty body and correct digest @@ -174,16 +150,16 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("unexpected error digesting empty tar: %v", err) } - uploadURLBase = startPushLayer(t, builder, imageName) - pushLayer(t, builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) + uploadURLBase = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ // Now, actually do successful upload. layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - uploadURLBase = startPushLayer(t, builder, imageName) - pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile) + uploadURLBase = startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------ // Use a head request to see if the layer exists. @@ -223,28 +199,12 @@ func TestLayerAPI(t *testing.T) { } func TestManifestAPI(t *testing.T) { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) - } - - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - }, - } - - app := NewApp(config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL) - if err != nil { - t.Fatalf("unexpected error creating url builder: %v", err) - } + env := newTestEnv(t) imageName := "foo/bar" tag := "thetag" - manifestURL, err := builder.BuildManifestURL(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -260,7 +220,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - tagsURL, err := builder.BuildTagsURL(imageName) + tagsURL, err := env.builder.BuildTagsURL(imageName) if err != nil { t.Fatalf("unexpected error building tags url: %v", err) } @@ -324,13 +284,13 @@ func TestManifestAPI(t *testing.T) { expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase := startPushLayer(t, builder, imageName) - pushLayer(t, builder, imageName, dgst, uploadURLBase, rs) + uploadURLBase := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } // ------------------- // Push the signed manifest with all layers pushed. - signedManifest, err := manifest.Sign(unsignedManifest, pk) + signedManifest, err := manifest.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } @@ -386,6 +346,46 @@ func TestManifestAPI(t *testing.T) { } } +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnv(t *testing.T) *testEnv { + ctx := context.Background() + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(ctx, config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: config, + app: app, + server: server, + builder: builder, + } +} + func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { var body []byte if sm, ok := v.(*manifest.SignedManifest); ok { diff --git a/docs/app.go b/docs/app.go index 6e5480c3..f40d35ef 100644 --- a/docs/app.go +++ b/docs/app.go @@ -7,10 +7,10 @@ import ( "os" "code.google.com/p/go-uuid/uuid" - log "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" "github.com/docker/distribution/auth" "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/storage" "github.com/docker/distribution/storage/notifications" "github.com/docker/distribution/storagedriver" @@ -23,6 +23,7 @@ import ( // on this object that will be accessible from all requests. Any writable // fields should be protected. type App struct { + context.Context Config configuration.Configuration // InstanceID is a unique id assigned to the application on each creation. @@ -43,16 +44,30 @@ type App struct { layerHandler storage.LayerHandler // allows dispatch of layer serving to external provider } +// Value intercepts calls context.Context.Value, returning the current app id, +// if requested. +func (app *App) Value(key interface{}) interface{} { + switch key { + case "app.id": + return app.InstanceID + } + + return app.Context.Value(key) +} + // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. -func NewApp(configuration configuration.Configuration) *App { +func NewApp(ctx context.Context, configuration configuration.Configuration) *App { app := &App{ Config: configuration, + Context: ctx, InstanceID: uuid.New(), router: v2.Router(), } + app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "app.id")) + // Register the handler dispatchers. app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) @@ -118,11 +133,11 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { var sinks []notifications.Sink for _, endpoint := range configuration.Notifications.Endpoints { if endpoint.Disabled { - log.Infof("endpoint %s disabled, skipping", endpoint.Name) + ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) continue } - log.Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ Timeout: endpoint.Timeout, Threshold: endpoint.Threshold, @@ -190,27 +205,29 @@ func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { ssrw.ResponseWriter.WriteHeader(status) } -// WithRequest adds an http request to the given context and requents -// a new context with an "http.request" value. -func WithRequest(ctx context.Context, r *http.Request) context.Context { - return context.WithValue(ctx, "http.request", r) +func (ssrw *singleStatusResponseWriter) Flush() { + if flusher, ok := ssrw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } } // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - context := app.context(r) + context := app.context(w, r) - if err := app.authorized(w, r, context, context.vars["name"]); err != nil { + defer func() { + ctxu.GetResponseLogger(context).Infof("response completed") + }() + + if err := app.authorized(w, r, context); err != nil { return } // decorate the authorized repository with an event bridge. context.Repository = notifications.Listen( context.Repository, app.eventBridge(context, r)) - - context.log = log.WithField("name", context.Repository.Name()) handler := dispatch(context, r) ssrw := &singleStatusResponseWriter{ResponseWriter: w} @@ -230,24 +247,34 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // context constructs the context object for the application. This only be // called once per request. -func (app *App) context(r *http.Request) *Context { - vars := mux.Vars(r) +func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { + ctx := ctxu.WithRequest(app, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithVars(ctx, r) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + "vars.name", + "vars.tag", + "vars.digest", + "vars.tag", + "vars.uuid")) + context := &Context{ App: app, - RequestID: uuid.New(), + Context: ctx, urlBuilder: v2.NewURLBuilderFromRequest(r), } - // Store vars for underlying handlers. - context.vars = vars - return context } // authorized checks if the request can proceed with access to the requested // repository. If it succeeds, the repository will be available on the // context. An error will be if access is not available. -func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context, repo string) error { +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + ctxu.GetLogger(context).Debug("authorizing request") + repo := getName(context) + if app.accessController == nil { // No access controller, so we simply provide access. context.Repository = app.registry.Repository(repo) @@ -308,7 +335,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont } } - authCtx, err := app.accessController.Authorized(WithRequest(nil, r), accessRecords...) + ctx, err := app.accessController.Authorized(context.Context, accessRecords...) if err != nil { switch err := err.(type) { case auth.Challenge: @@ -323,16 +350,14 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // the configuration or whatever is backing the access // controller. Just return a bad request with no information // to avoid exposure. The request should not proceed. - context.log.Errorf("error checking authorization: %v", err) + ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) w.WriteHeader(http.StatusBadRequest) } return err } - // The authorized context should contain an auth.UserInfo - // object. If it doesn't, just use the zero value for now. - context.AuthUserInfo, _ = authCtx.Value("auth.user").(auth.UserInfo) + context.Context = ctx // At this point, the request should have access to the repository under // the requested operation. Make is available on the context. @@ -345,9 +370,9 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // correct actor and source. func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { actor := notifications.ActorRecord{ - Name: ctx.AuthUserInfo.Name, + Name: getUserName(ctx, r), } - request := notifications.NewRequestRecord(ctx.RequestID, r) + request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) } diff --git a/docs/app_test.go b/docs/app_test.go index d49c7bbd..9b106575 100644 --- a/docs/app_test.go +++ b/docs/app_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution/configuration" "github.com/docker/distribution/storage" "github.com/docker/distribution/storagedriver/inmemory" + "golang.org/x/net/context" ) // TestAppDispatcher builds an application with a test dispatcher and ensures @@ -22,6 +23,7 @@ func TestAppDispatcher(t *testing.T) { driver := inmemory.New() app := &App{ Config: configuration.Configuration{}, + Context: context.Background(), router: v2.Router(), driver: driver, registry: storage.NewRegistryWithDriver(driver), @@ -37,19 +39,19 @@ func TestAppDispatcher(t *testing.T) { varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context - if ctx.Repository.Name() != ctx.vars["name"] { + if ctx.Repository.Name() != getName(ctx) { t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") } // Check that we have all that is expected for expectedK, expectedV := range expectedVars { - if ctx.vars[expectedK] != expectedV { - t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.vars[expectedK], expectedV) + if ctx.Value(expectedK) != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) } } // Check that we only have variables that are expected - for k, v := range ctx.vars { + for k, v := range ctx.Value("vars").(map[string]string) { _, ok := expectedVars[k] if !ok { // name is checked on context @@ -135,6 +137,7 @@ func TestAppDispatcher(t *testing.T) { // TestNewApp covers the creation of an application via NewApp with a // configuration. func TestNewApp(t *testing.T) { + ctx := context.Background() config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": nil, @@ -152,7 +155,7 @@ func TestNewApp(t *testing.T) { // Mostly, with this test, given a sane configuration, we are simply // ensuring that NewApp doesn't panic. We might want to tweak this // behavior. - app := NewApp(config) + app := NewApp(ctx, config) server := httptest.NewServer(app) builder, err := v2.NewURLBuilderFromString(server.URL) diff --git a/docs/basicauth.go b/docs/basicauth.go new file mode 100644 index 00000000..55794ee3 --- /dev/null +++ b/docs/basicauth.go @@ -0,0 +1,11 @@ +// +build go1.4 + +package registry + +import ( + "net/http" +) + +func basicAuth(r *http.Request) (username, password string, ok bool) { + return r.BasicAuth() +} diff --git a/docs/basicauth_prego14.go b/docs/basicauth_prego14.go new file mode 100644 index 00000000..dc563135 --- /dev/null +++ b/docs/basicauth_prego14.go @@ -0,0 +1,41 @@ +// +build !go1.4 + +package registry + +import ( + "encoding/base64" + "net/http" + "strings" +) + +// NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we +// can compile on go1.3 and earlier. + +// BasicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBasicAuth(auth) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} diff --git a/docs/context.go b/docs/context.go index 150e5de6..7c4dbb02 100644 --- a/docs/context.go +++ b/docs/context.go @@ -1,10 +1,14 @@ package registry import ( - "github.com/Sirupsen/logrus" + "fmt" + "net/http" + "github.com/docker/distribution/api/v2" - "github.com/docker/distribution/auth" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" + "golang.org/x/net/context" ) // Context should contain the request specific context for use in across @@ -13,9 +17,7 @@ import ( type Context struct { // App points to the application structure that created this context. *App - - // RequestID is the unique id of the request. - RequestID string + context.Context // Repository is the repository for the current request. All requests // should be scoped to a single repository. This field may be nil. @@ -26,15 +28,63 @@ type Context struct { // handler *must not* start the response via http.ResponseWriter. Errors v2.Errors - // AuthUserInfo contains information about an authorized client. - AuthUserInfo auth.UserInfo - - // vars contains the extracted gorilla/mux variables that can be used for - // assignment. - vars map[string]string - - // log provides a context specific logger. - log *logrus.Entry - urlBuilder *v2.URLBuilder + + // TODO(stevvooe): The goal is too completely factor this context and + // dispatching out of the web application. Ideally, we should lean on + // context.Context for injection of these resources. +} + +// Value overrides context.Context.Value to ensure that calls are routed to +// correct context. +func (ctx *Context) Value(key interface{}) interface{} { + return ctx.Context.Value(key) +} + +func getName(ctx context.Context) (name string) { + return ctxu.GetStringValue(ctx, "vars.name") +} + +func getTag(ctx context.Context) (tag string) { + return ctxu.GetStringValue(ctx, "vars.tag") +} + +var errDigestNotAvailable = fmt.Errorf("digest not available in context") + +func getDigest(ctx context.Context) (dgst digest.Digest, err error) { + dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + + if dgstStr == "" { + ctxu.GetLogger(ctx).Errorf("digest not available") + return "", errDigestNotAvailable + } + + d, err := digest.ParseDigest(dgstStr) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + return "", err + } + + return d, nil +} + +func getUploadUUID(ctx context.Context) (uuid string) { + return ctxu.GetStringValue(ctx, "vars.uuid") +} + +// getUserName attempts to resolve a username from the context and request. If +// a username cannot be resolved, the empty string is returned. +func getUserName(ctx context.Context, r *http.Request) string { + username := ctxu.GetStringValue(ctx, "auth.user.name") + + // Fallback to request user with basic auth + if username == "" { + var ok bool + uname, _, ok := basicAuth(r) + if ok { + username = uname + } + } + + return username } diff --git a/docs/images.go b/docs/images.go index db6bd705..c44b0b21 100644 --- a/docs/images.go +++ b/docs/images.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/docker/distribution/api/v2" + ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/storage" @@ -17,11 +18,9 @@ import ( func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { imageManifestHandler := &imageManifestHandler{ Context: ctx, - Tag: ctx.vars["tag"], + Tag: getTag(ctx), } - imageManifestHandler.log = imageManifestHandler.log.WithField("tag", imageManifestHandler.Tag) - return handlers.MethodHandler{ "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), @@ -38,6 +37,7 @@ type imageManifestHandler struct { // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("GetImageManifest") manifests := imh.Repository.Manifests() manifest, err := manifests.Get(imh.Tag) @@ -54,6 +54,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("PutImageManifest") manifests := imh.Repository.Manifests() dec := json.NewDecoder(r.Body) @@ -98,6 +99,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http // DeleteImageManifest removes the image with the given tag from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(imh).Debug("DeleteImageManifest") manifests := imh.Repository.Manifests() if err := manifests.Delete(imh.Tag); err != nil { switch err := err.(type) { diff --git a/docs/layer.go b/docs/layer.go index bea1cc8b..10569465 100644 --- a/docs/layer.go +++ b/docs/layer.go @@ -4,6 +4,7 @@ import ( "net/http" "github.com/docker/distribution/api/v2" + ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" "github.com/gorilla/handlers" @@ -11,9 +12,16 @@ import ( // layerDispatcher uses the request context to build a layerHandler. func layerDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := digest.ParseDigest(ctx.vars["digest"]) - + dgst, err := getDigest(ctx) if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) }) @@ -24,8 +32,6 @@ func layerDispatcher(ctx *Context, r *http.Request) http.Handler { Digest: dgst, } - layerHandler.log = layerHandler.log.WithField("digest", dgst) - return handlers.MethodHandler{ "GET": http.HandlerFunc(layerHandler.GetLayer), "HEAD": http.HandlerFunc(layerHandler.GetLayer), @@ -42,6 +48,7 @@ type layerHandler struct { // GetLayer fetches the binary data from backend storage returns it in the // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(lh).Debug("GetImageLayer") layers := lh.Repository.Layers() layer, err := layers.Fetch(lh.Digest) diff --git a/docs/layerupload.go b/docs/layerupload.go index cfce98f3..f30bb3aa 100644 --- a/docs/layerupload.go +++ b/docs/layerupload.go @@ -7,8 +7,8 @@ import ( "net/url" "os" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/api/v2" + ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" "github.com/gorilla/handlers" @@ -19,7 +19,7 @@ import ( func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { luh := &layerUploadHandler{ Context: ctx, - UUID: ctx.vars["uuid"], + UUID: getUploadUUID(ctx), } handler := http.Handler(handlers.MethodHandler{ @@ -33,12 +33,10 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { }) if luh.UUID != "" { - luh.log = luh.log.WithField("uuid", luh.UUID) - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.log.Infof("error resolving upload: %v", err) + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) @@ -47,7 +45,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.Name != ctx.Repository.Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.log.Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) @@ -55,7 +53,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.UUID != luh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.log.Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) @@ -64,7 +62,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { layers := ctx.Repository.Layers() upload, err := layers.Resume(luh.UUID) if err != nil { - ctx.log.Errorf("error resolving upload: %v", err) + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == storage.ErrLayerUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) @@ -86,7 +84,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { // start over. if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { defer upload.Close() - ctx.log.Infof("error seeking layer upload: %v", err) + ctxu.GetLogger(ctx).Infof("error seeking layer upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) @@ -94,7 +92,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { }) } else if nn != luh.State.Offset { defer upload.Close() - ctx.log.Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) + ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) @@ -202,7 +200,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) default: - luh.log.Errorf("unknown error completing upload: %#v", err) + ctxu.GetLogger(luh).Errorf("unknown error completing upload: %#v", err) w.WriteHeader(http.StatusInternalServerError) luh.Errors.Push(v2.ErrorCodeUnknown, err) } @@ -210,7 +208,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * // Clean up the backend layer data if there was an error. if err := luh.Upload.Cancel(); err != nil { // If the cleanup fails, all we can do is observe and report. - luh.log.Errorf("error canceling upload after error: %v", err) + ctxu.GetLogger(luh).Errorf("error canceling upload after error: %v", err) } return @@ -238,7 +236,7 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. } if err := luh.Upload.Cancel(); err != nil { - luh.log.Errorf("error encountered canceling upload: %v", err) + ctxu.GetLogger(luh).Errorf("error encountered canceling upload: %v", err) w.WriteHeader(http.StatusInternalServerError) luh.Errors.PushErr(err) } @@ -253,7 +251,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt offset, err := luh.Upload.Seek(0, os.SEEK_CUR) if err != nil { - luh.log.Errorf("unable get current offset of layer upload: %v", err) + ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) return err } @@ -265,7 +263,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt token, err := hmacKey(luh.Config.HTTP.Secret).packUploadState(luh.State) if err != nil { - logrus.Infof("error building upload state token: %s", err) + ctxu.GetLogger(luh).Infof("error building upload state token: %s", err) return err } @@ -275,7 +273,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt "_state": []string{token}, }) if err != nil { - logrus.Infof("error building upload url: %s", err) + ctxu.GetLogger(luh).Infof("error building upload url: %s", err) return err } From 9bde7d9835c583cbfcacbadd9a4725ec703b1a0a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 9 Feb 2015 14:44:58 -0800 Subject: [PATCH 016/501] Integrate context with storage package This changeset integrates context with the storage package. Debug messages have been added to exported methods. Existing log messages will now include contextual details through logger fields to aid in debugging. This integration focuses on logging and may be followed up with a metric-oriented change in the future. Signed-off-by: Stephen J Day --- docs/app.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/app.go b/docs/app.go index f40d35ef..d2f9e2d9 100644 --- a/docs/app.go +++ b/docs/app.go @@ -227,7 +227,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // decorate the authorized repository with an event bridge. context.Repository = notifications.Listen( - context.Repository, app.eventBridge(context, r)) + app.registry.Repository(context, getName(context)), + app.eventBridge(context, r)) handler := dispatch(context, r) ssrw := &singleStatusResponseWriter{ResponseWriter: w} @@ -276,9 +277,6 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont repo := getName(context) if app.accessController == nil { - // No access controller, so we simply provide access. - context.Repository = app.registry.Repository(repo) - return nil // access controller is not enabled. } @@ -357,12 +355,11 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont return err } + // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context + // should be replaced by another, rather than replacing the context on a + // mutable object. context.Context = ctx - // At this point, the request should have access to the repository under - // the requested operation. Make is available on the context. - context.Repository = app.registry.Repository(repo) - return nil } From 287e11e1d494eb32bb7c13fe5ada2ca0dfbfc782 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 15:19:02 -0800 Subject: [PATCH 017/501] Correctly return when repo name is not available The branch that executes after a failed request authorization due to a missing repo name now correctly returns an error. This is somewhat superficial since the response would have already been executed. Although, unintended repository operations may have occurred. Documentations and comments have also been updated to be in line with surrounding changes. Signed-off-by: Stephen J Day --- docs/app.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/docs/app.go b/docs/app.go index d2f9e2d9..817373f2 100644 --- a/docs/app.go +++ b/docs/app.go @@ -222,6 +222,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { }() if err := app.authorized(w, r, context); err != nil { + ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) return } @@ -270,8 +271,8 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { } // authorized checks if the request can proceed with access to the requested -// repository. If it succeeds, the repository will be available on the -// context. An error will be if access is not available. +// repository. If it succeeds, the context may access the requested +// repository. An error will be returned if access is not available. func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { ctxu.GetLogger(context).Debug("authorizing request") repo := getName(context) @@ -319,17 +320,19 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont route := mux.CurrentRoute(r) if route == nil || route.GetName() != v2.RouteNameBase { - // For this to be properly secured, context.Name must always be set - // for a resource that may make a modification. The only condition - // under which name is not set and we still allow access is when the - // base route is accessed. This section prevents us from making that - // mistake elsewhere in the code, allowing any operation to proceed. + // For this to be properly secured, repo must always be set for a + // resource that may make a modification. The only condition under + // which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making + // that mistake elsewhere in the code, allowing any operation to + // proceed. w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusForbidden) var errs v2.Errors errs.Push(v2.ErrorCodeUnauthorized) serveJSON(w, errs) + return fmt.Errorf("forbidden: no repository name") } } From 54ae545ed3cc5c95c46ff996f3b6f541ff6aaacc Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 17:25:40 -0800 Subject: [PATCH 018/501] Move registry package into handler package The goal is to free up the distribution/registry package to include common registry types. This moves the webapp definitions out of the way to allow for this change in the future. Signed-off-by: Stephen J Day --- docs/doc.go | 3 +++ docs/{ => handlers}/api_test.go | 2 +- docs/{ => handlers}/app.go | 2 +- docs/{ => handlers}/app_test.go | 2 +- docs/{ => handlers}/basicauth.go | 2 +- docs/{ => handlers}/basicauth_prego14.go | 2 +- docs/{ => handlers}/context.go | 2 +- docs/{ => handlers}/helpers.go | 2 +- docs/{ => handlers}/hmac.go | 2 +- docs/{ => handlers}/hmac_test.go | 2 +- docs/{ => handlers}/images.go | 2 +- docs/{ => handlers}/layer.go | 2 +- docs/{ => handlers}/layerupload.go | 2 +- docs/{ => handlers}/tags.go | 2 +- 14 files changed, 16 insertions(+), 13 deletions(-) create mode 100644 docs/doc.go rename docs/{ => handlers}/api_test.go (99%) rename docs/{ => handlers}/app.go (99%) rename docs/{ => handlers}/app_test.go (99%) rename docs/{ => handlers}/basicauth.go (88%) rename docs/{ => handlers}/basicauth_prego14.go (98%) rename docs/{ => handlers}/context.go (99%) rename docs/{ => handlers}/helpers.go (97%) rename docs/{ => handlers}/hmac.go (99%) rename docs/{ => handlers}/hmac_test.go (99%) rename docs/{ => handlers}/images.go (99%) rename docs/{ => handlers}/layer.go (99%) rename docs/{ => handlers}/layerupload.go (99%) rename docs/{ => handlers}/tags.go (98%) diff --git a/docs/doc.go b/docs/doc.go new file mode 100644 index 00000000..5049dae3 --- /dev/null +++ b/docs/doc.go @@ -0,0 +1,3 @@ +// Package registry is a placeholder package for registry interface +// destinations and utilities. +package registry diff --git a/docs/api_test.go b/docs/handlers/api_test.go similarity index 99% rename from docs/api_test.go rename to docs/handlers/api_test.go index 5e3bd72c..1d1173a9 100644 --- a/docs/api_test.go +++ b/docs/handlers/api_test.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "bytes" diff --git a/docs/app.go b/docs/handlers/app.go similarity index 99% rename from docs/app.go rename to docs/handlers/app.go index 817373f2..e49144e2 100644 --- a/docs/app.go +++ b/docs/handlers/app.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "fmt" diff --git a/docs/app_test.go b/docs/handlers/app_test.go similarity index 99% rename from docs/app_test.go rename to docs/handlers/app_test.go index 9b106575..927f40a4 100644 --- a/docs/app_test.go +++ b/docs/handlers/app_test.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "encoding/json" diff --git a/docs/basicauth.go b/docs/handlers/basicauth.go similarity index 88% rename from docs/basicauth.go rename to docs/handlers/basicauth.go index 55794ee3..8727a3cd 100644 --- a/docs/basicauth.go +++ b/docs/handlers/basicauth.go @@ -1,6 +1,6 @@ // +build go1.4 -package registry +package handlers import ( "net/http" diff --git a/docs/basicauth_prego14.go b/docs/handlers/basicauth_prego14.go similarity index 98% rename from docs/basicauth_prego14.go rename to docs/handlers/basicauth_prego14.go index dc563135..6cf10a25 100644 --- a/docs/basicauth_prego14.go +++ b/docs/handlers/basicauth_prego14.go @@ -1,6 +1,6 @@ // +build !go1.4 -package registry +package handlers import ( "encoding/base64" diff --git a/docs/context.go b/docs/handlers/context.go similarity index 99% rename from docs/context.go rename to docs/handlers/context.go index 7c4dbb02..8f277595 100644 --- a/docs/context.go +++ b/docs/handlers/context.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "fmt" diff --git a/docs/helpers.go b/docs/handlers/helpers.go similarity index 97% rename from docs/helpers.go rename to docs/handlers/helpers.go index 6bcb4ae8..f2879137 100644 --- a/docs/helpers.go +++ b/docs/handlers/helpers.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "encoding/json" diff --git a/docs/hmac.go b/docs/handlers/hmac.go similarity index 99% rename from docs/hmac.go rename to docs/handlers/hmac.go index d2470087..e17ececa 100644 --- a/docs/hmac.go +++ b/docs/handlers/hmac.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "crypto/hmac" diff --git a/docs/hmac_test.go b/docs/handlers/hmac_test.go similarity index 99% rename from docs/hmac_test.go rename to docs/handlers/hmac_test.go index 5ad60f61..cce2cd49 100644 --- a/docs/hmac_test.go +++ b/docs/handlers/hmac_test.go @@ -1,4 +1,4 @@ -package registry +package handlers import "testing" diff --git a/docs/images.go b/docs/handlers/images.go similarity index 99% rename from docs/images.go rename to docs/handlers/images.go index c44b0b21..c26a2239 100644 --- a/docs/images.go +++ b/docs/handlers/images.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "encoding/json" diff --git a/docs/layer.go b/docs/handlers/layer.go similarity index 99% rename from docs/layer.go rename to docs/handlers/layer.go index 10569465..31d24b86 100644 --- a/docs/layer.go +++ b/docs/handlers/layer.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "net/http" diff --git a/docs/layerupload.go b/docs/handlers/layerupload.go similarity index 99% rename from docs/layerupload.go rename to docs/handlers/layerupload.go index f30bb3aa..a15e274a 100644 --- a/docs/layerupload.go +++ b/docs/handlers/layerupload.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "fmt" diff --git a/docs/tags.go b/docs/handlers/tags.go similarity index 98% rename from docs/tags.go rename to docs/handlers/tags.go index 1f745c6a..65ffacfc 100644 --- a/docs/tags.go +++ b/docs/handlers/tags.go @@ -1,4 +1,4 @@ -package registry +package handlers import ( "encoding/json" From 3822e685a03027d7b4408fbcc326428e0d2432fd Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 17:32:22 -0800 Subject: [PATCH 019/501] Move registry api definitions under registry package Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 1422 ++++++++++++++++++++++++++++++++++ docs/api/v2/doc.go | 9 + docs/api/v2/errors.go | 191 +++++ docs/api/v2/errors_test.go | 165 ++++ docs/api/v2/names.go | 115 +++ docs/api/v2/names_test.go | 93 +++ docs/api/v2/routes.go | 36 + docs/api/v2/routes_test.go | 198 +++++ docs/api/v2/urls.go | 201 +++++ docs/api/v2/urls_test.go | 155 ++++ docs/handlers/api_test.go | 2 +- docs/handlers/app.go | 2 +- docs/handlers/app_test.go | 2 +- docs/handlers/context.go | 2 +- docs/handlers/images.go | 2 +- docs/handlers/layer.go | 2 +- docs/handlers/layerupload.go | 2 +- docs/handlers/tags.go | 2 +- 18 files changed, 2593 insertions(+), 8 deletions(-) create mode 100644 docs/api/v2/descriptors.go create mode 100644 docs/api/v2/doc.go create mode 100644 docs/api/v2/errors.go create mode 100644 docs/api/v2/errors_test.go create mode 100644 docs/api/v2/names.go create mode 100644 docs/api/v2/names_test.go create mode 100644 docs/api/v2/routes.go create mode 100644 docs/api/v2/routes_test.go create mode 100644 docs/api/v2/urls.go create mode 100644 docs/api/v2/urls_test.go diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go new file mode 100644 index 00000000..2c6fafd0 --- /dev/null +++ b/docs/api/v2/descriptors.go @@ -0,0 +1,1422 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: RepositoryNameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + tagParameterDescriptor = ParameterDescriptor{ + Name: "tag", + Type: "string", + Format: TagNameRegexp.String(), + Required: true, + Description: `Tag of the target manifiest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: `A uuid identifying the upload. This field can accept almost anything.`, + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + unauthorizedResponse = ResponseDescriptor{ + Description: "The client does not have access to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } + + unauthorizedResponsePush = ResponseDescriptor{ + Description: "The client does not have access to push to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` + + unauthorizedErrorsBody = `{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor + + // ErrorDescriptors provides a list of the error codes and their + // associated documentation and metadata. + ErrorDescriptors []ErrorDescriptor +}{ + RouteDescriptors: routeDescriptors, + ErrorDescriptors: errorDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particalar request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particalar response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status recieved by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCodes provides a list of status under which this error + // condition may arise. If it is empty, the error condition may be seen + // for any status code. + HTTPStatusCodes []int +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The client is not authorized to access the registry.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{tag:" + TagNameRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `tag`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest idenfied by `name` and `tag`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or tag was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The named manifest is not known to the registry.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `tag`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have permission to push to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] +}`, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `tag`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Tag", + Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.", + Methods: []MethodDescriptor{ + + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", + Entity: "Intiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Description: "Upload a chunk of data to specified upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + // TODO(stevvooe): Break this down into three separate requests: + // 1. Complete an upload where all data has already been sent. + // 2. Complete an upload where the entire body is in the PUT. + // 3. Complete an upload where the final, partial chunk is the body. + + Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, +} + +// ErrorDescriptors provides a list of HTTP API Error codes that may be +// encountered when interacting with the registry API. +var errorDescriptors = []ErrorDescriptor{ + { + Code: ErrorCodeUnknown, + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + }, + { + Code: ErrorCodeUnauthorized, + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + }, + { + Code: ErrorCodeDigestInvalid, + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeSizeInvalid, + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeNameInvalid, + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeTagInvalid, + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeNameUnknown, + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeManifestUnknown, + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeManifestInvalid, + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeManifestUnverified, + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeBlobUnknown, + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + + { + Code: ErrorCodeBlobUploadUnknown, + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeBlobUploadInvalid, + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, +} + +var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor +var idToDescriptors map[string]ErrorDescriptor + +func init() { + errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) + idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) + + for _, descriptor := range errorDescriptors { + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + } +} diff --git a/docs/api/v2/doc.go b/docs/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/docs/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go new file mode 100644 index 00000000..4d5d55c7 --- /dev/null +++ b/docs/api/v2/errors.go @@ -0,0 +1,191 @@ +package v2 + +import ( + "fmt" + "strings" +) + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +const ( + // ErrorCodeUnknown is a catch-all for errors not defined below. + ErrorCodeUnknown ErrorCode = iota + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized + + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + // size does not match the content length. + ErrorCodeSizeInvalid + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verfication. + ErrorCodeManifestUnverified + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid +) + +// ParseErrorCode attempts to parse the error code string, returning +// ErrorCodeUnknown if the error is not known. +func ParseErrorCode(s string) ErrorCode { + desc, ok := idToDescriptors[s] + + if !ok { + return ErrorCodeUnknown + } + + return desc.Code +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Push pushes an error on to the error stack, with the optional detail +// argument. It is a programming error (ie panic) to push more than one +// detail at a time. +func (errs *Errors) Push(code ErrorCode, details ...interface{}) { + if len(details) > 1 { + panic("please specify zero or one detail items for this error") + } + + var detail interface{} + if len(details) > 0 { + detail = details[0] + } + + if err, ok := detail.(error); ok { + detail = err.Error() + } + + errs.PushErr(Error{ + Code: code, + Message: code.Message(), + Detail: detail, + }) +} + +// PushErr pushes an error interface onto the error stack. +func (errs *Errors) PushErr(err error) { + switch err.(type) { + case Error: + errs.Errors = append(errs.Errors, err.(Error)) + default: + errs.Errors = append(errs.Errors, Error{Message: err.Error()}) + } +} + +func (errs *Errors) Error() string { + switch errs.Len() { + case 0: + return "" + case 1: + return errs.Errors[0].Error() + default: + msg := "errors:\n" + for _, err := range errs.Errors { + msg += err.Error() + "\n" + } + return msg + } +} + +// Clear clears the errors. +func (errs *Errors) Clear() { + errs.Errors = errs.Errors[:0] +} + +// Len returns the current number of errors. +func (errs *Errors) Len() int { + return len(errs.Errors) +} diff --git a/docs/api/v2/errors_test.go b/docs/api/v2/errors_test.go new file mode 100644 index 00000000..9cc831c4 --- /dev/null +++ b/docs/api/v2/errors_test.go @@ -0,0 +1,165 @@ +package v2 + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution/digest" +) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + for _, desc := range errorDescriptors { + if desc.Code.String() != desc.Value { + t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) + } + + if desc.Code.Message() != desc.Message { + t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) + } + + // Serialize the error code using the json library to ensure that we + // get a string and it works round trip. + p, err := json.Marshal(desc.Code) + + if err != nil { + t.Fatalf("error marshaling error code %v: %v", desc.Code, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", desc.Code) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + } + + if ecUnmarshaled != desc.Code { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) + } + } +} + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs.Push(ErrorCodeDigestInvalid) + errs.Push(ErrorCodeBlobUnknown, + map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + errs.Clear() + errs.Push(ErrorCodeUnknown) + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } +} + +// TestMarshalUnmarshal ensures that api errors can round trip through json +// without losing information. +func TestMarshalUnmarshal(t *testing.T) { + + var errors Errors + + for _, testcase := range []struct { + description string + err Error + }{ + { + description: "unknown error", + err: Error{ + + Code: ErrorCodeUnknown, + Message: ErrorCodeUnknown.Descriptor().Message, + }, + }, + { + description: "unknown manifest", + err: Error{ + Code: ErrorCodeManifestUnknown, + Message: ErrorCodeManifestUnknown.Descriptor().Message, + }, + }, + { + description: "unknown manifest", + err: Error{ + Code: ErrorCodeBlobUnknown, + Message: ErrorCodeBlobUnknown.Descriptor().Message, + Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, + }, + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + unexpectedErr := func(err error) { + fatalf("unexpected error: %v", err) + } + + p, err := json.Marshal(testcase.err) + if err != nil { + unexpectedErr(err) + } + + var unmarshaled Error + if err := json.Unmarshal(p, &unmarshaled); err != nil { + unexpectedErr(err) + } + + if !reflect.DeepEqual(unmarshaled, testcase.err) { + fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) + } + + // Roll everything up into an error response envelope. + errors.PushErr(testcase.err) + } + + p, err := json.Marshal(errors) + if err != nil { + t.Fatalf("unexpected error marshaling error envelope: %v", err) + } + + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errors) { + t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) + } +} diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go new file mode 100644 index 00000000..d05eeb6a --- /dev/null +++ b/docs/api/v2/names.go @@ -0,0 +1,115 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" +) + +const ( + // RepositoryNameComponentMinLength is the minimum number of characters in a + // single repository name slash-delimited component + RepositoryNameComponentMinLength = 2 + + // RepositoryNameComponentMaxLength is the maximum number of characters in a + // single repository name slash-delimited component + RepositoryNameComponentMaxLength = 30 + + // RepositoryNameMinComponents is the minimum number of slash-delimited + // components that a repository name must have + RepositoryNameMinComponents = 1 + + // RepositoryNameMaxComponents is the maximum number of slash-delimited + // components that a repository name must have + RepositoryNameMaxComponents = 5 + + // RepositoryNameTotalLengthMax is the maximum total number of characters in + // a repository name + RepositoryNameTotalLengthMax = 255 +) + +// RepositoryNameComponentRegexp restricts registtry path components names to +// start with at least two letters or numbers, with following parts able to +// separated by one period, dash or underscore. +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) + +// RepositoryNameComponentAnchoredRegexp is the version of +// RepositoryNameComponentRegexp which must completely match the content +var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) + +// TODO(stevvooe): RepositoryName needs to be limited to some fixed length. +// Looking path prefixes and s3 limitation of 1024, this should likely be +// around 512 bytes. 256 bytes might be more manageable. + +// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to +// 5 path components, separated by a forward slash. +var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String()) + +// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. +var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + +// TODO(stevvooe): Contribute these exports back to core, so they are shared. + +var ( + // ErrRepositoryNameComponentShort is returned when a repository name + // contains a component which is shorter than + // RepositoryNameComponentMinLength + ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) + + // ErrRepositoryNameComponentLong is returned when a repository name + // contains a component which is longer than + // RepositoryNameComponentMaxLength + ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength) + + // ErrRepositoryNameMissingComponents is returned when a repository name + // contains fewer than RepositoryNameMinComponents components + ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + + // ErrRepositoryNameTooManyComponents is returned when a repository name + // contains more than RepositoryNameMaxComponents components + ErrRepositoryNameTooManyComponents = fmt.Errorf("repository name %v or less components", RepositoryNameMaxComponents) + + // ErrRepositoryNameLong is returned when a repository name is longer than + // RepositoryNameTotalLengthMax + ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + + // ErrRepositoryNameComponentInvalid is returned when a repository name does + // not match RepositoryNameComponentRegexp + ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) +) + +// ValidateRespositoryName ensures the repository name is valid for use in the +// registry. This function accepts a superset of what might be accepted by +// docker core or docker hub. If the name does not pass validation, an error, +// describing the conditions, is returned. +func ValidateRespositoryName(name string) error { + if len(name) > RepositoryNameTotalLengthMax { + return ErrRepositoryNameLong + } + + components := strings.Split(name, "/") + + if len(components) < RepositoryNameMinComponents { + return ErrRepositoryNameMissingComponents + } + + if len(components) > RepositoryNameMaxComponents { + return ErrRepositoryNameTooManyComponents + } + + for _, component := range components { + if len(component) < RepositoryNameComponentMinLength { + return ErrRepositoryNameComponentShort + } + + if len(component) > RepositoryNameComponentMaxLength { + return ErrRepositoryNameComponentLong + } + + if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { + return ErrRepositoryNameComponentInvalid + } + } + + return nil +} diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go new file mode 100644 index 00000000..69ba5305 --- /dev/null +++ b/docs/api/v2/names_test.go @@ -0,0 +1,93 @@ +package v2 + +import ( + "testing" +) + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range []struct { + input string + err error + }{ + { + input: "short", + }, + { + input: "simple/name", + }, + { + input: "library/ubuntu", + }, + { + input: "docker/stevvooe/app", + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + err: ErrRepositoryNameTooManyComponents, + }, + { + input: "aa/aa/bb/bb/bb", + }, + { + input: "a/a/a/b/b", + err: ErrRepositoryNameComponentShort, + }, + { + input: "a/a/a/a/", + err: ErrRepositoryNameComponentShort, + }, + { + input: "foo.com/bar/baz", + }, + { + input: "blog.foo.com/bar/baz", + }, + { + input: "asdf", + }, + { + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "aa-a/aa", + }, + { + input: "aa/aa", + }, + { + input: "a-a/a-a", + }, + { + input: "a", + err: ErrRepositoryNameComponentShort, + }, + { + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, + }, + } { + + failf := func(format string, v ...interface{}) { + t.Logf(testcase.input+": "+format, v...) + t.Fail() + } + + if err := ValidateRespositoryName(testcase.input); err != testcase.err { + if testcase.err != nil { + if err != nil { + failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) + } else { + failf("expected invalid repository: %v", testcase.err) + } + } else { + if err != nil { + // Wrong error returned. + failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) + } else { + failf("unexpected error validating repository name: %v", err) + } + } + } + } +} diff --git a/docs/api/v2/routes.go b/docs/api/v2/routes.go new file mode 100644 index 00000000..ef933600 --- /dev/null +++ b/docs/api/v2/routes.go @@ -0,0 +1,36 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + router := mux.NewRouter(). + StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return router +} diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go new file mode 100644 index 00000000..af424616 --- /dev/null +++ b/docs/api/v2/routes_test.go @@ -0,0 +1,198 @@ +package v2 + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/gorilla/mux" +) + +type routeTestCase struct { + RequestURI string + Vars map[string]string + RouteName string + StatusCode int +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + + router := Router() + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range []routeTestCase{ + { + RouteName: RouteNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/manifests/bar", + Vars: map[string]string{ + "name": "foo", + "tag": "bar", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/tag", + Vars: map[string]string{ + "name": "foo/bar", + "tag": "tag", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "tarsum.dev+foo:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlob, + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "digest": "sha256:abcdef0919234", + }, + }, + { + RouteName: RouteNameBlobUpload, + RequestURI: "/v2/foo/bar/blobs/uploads/", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + }, + }, + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/manifests/tags", + Vars: map[string]string{ + "name": "foo/bar/manifests", + "tag": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/manifest" + RouteName: RouteNameTags, + RequestURI: "/v2/foo/bar/manifests/tags/list", + Vars: map[string]string{ + "name": "foo/bar/manifests", + }, + }, + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + } { + // Register the endpoint + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + + u := server.URL + testcase.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + + if resp.StatusCode != testcase.StatusCode { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + + if testcase.StatusCode != http.StatusOK { + // We don't care about json response. + continue + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeTestCase + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode + + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + if !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) + } + } + +} diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go new file mode 100644 index 00000000..6f2fd6e8 --- /dev/null +++ b/docs/api/v2/urls.go @@ -0,0 +1,201 @@ +package v2 + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution/digest" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { + var scheme string + + forwardedProto := r.Header.Get("X-Forwarded-Proto") + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case r.TLS != nil: + scheme = "https" + case len(r.URL.Scheme) > 0: + scheme = r.URL.Scheme + default: + scheme = "http" + } + + host := r.Host + forwardedHost := r.Header.Get("X-Forwarded-Host") + if len(forwardedHost) > 0 { + host = forwardedHost + } + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + return NewURLBuilder(u) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and tag. +func (ub *URLBuilder) BuildManifestURL(name, tag string) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + manifestURL, err := route.URL("name", name, "tag", tag) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", name, "digest", dgst.String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name, "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root} +} + +type clonedRoute struct { + *mux.Route + root *url.URL +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + return cr.root.ResolveReference(routeURL), nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go new file mode 100644 index 00000000..d8001c2a --- /dev/null +++ b/docs/api/v2/urls_test.go @@ -0,0 +1,155 @@ +package v2 + +import ( + "net/http" + "net/url" + "testing" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL("foo/bar") + }, + }, + { + description: "test manifest url", + expectedPath: "/v2/foo/bar/manifests/tag", + build: func() (string, error) { + return urlBuilder.BuildManifestURL("foo/bar", "tag") + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + build: func() (string, error) { + return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar") + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 1d1173a9..08d0cf97 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -14,7 +14,7 @@ import ( "reflect" "testing" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/configuration" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" diff --git a/docs/handlers/app.go b/docs/handlers/app.go index e49144e2..876c69b4 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -7,7 +7,7 @@ import ( "os" "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/auth" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 927f40a4..249c381b 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -7,7 +7,7 @@ import ( "net/url" "testing" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/auth/silly" "github.com/docker/distribution/configuration" "github.com/docker/distribution/storage" diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 8f277595..c940d8f4 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -4,7 +4,7 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" diff --git a/docs/handlers/images.go b/docs/handlers/images.go index c26a2239..9c2dfa43 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 31d24b86..73641dde 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -3,7 +3,7 @@ package handlers import ( "net/http" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index a15e274a..190cf86b 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -7,7 +7,7 @@ import ( "net/url" "os" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/storage" diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 65ffacfc..1e8e43d5 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net/http" - "github.com/docker/distribution/api/v2" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/storage" "github.com/gorilla/handlers" ) From c3b07952ad9a7d553a6374e68e2ea2997a381ea9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 17:34:04 -0800 Subject: [PATCH 020/501] Move auth package under registry package Signed-off-by: Stephen J Day --- docs/auth/auth.go | 142 ++++++++++ docs/auth/silly/access.go | 96 +++++++ docs/auth/silly/access_test.go | 70 +++++ docs/auth/token/accesscontroller.go | 274 ++++++++++++++++++++ docs/auth/token/stringset.go | 35 +++ docs/auth/token/token.go | 343 ++++++++++++++++++++++++ docs/auth/token/token_test.go | 386 ++++++++++++++++++++++++++++ docs/auth/token/util.go | 58 +++++ docs/handlers/app.go | 2 +- docs/handlers/app_test.go | 2 +- 10 files changed, 1406 insertions(+), 2 deletions(-) create mode 100644 docs/auth/auth.go create mode 100644 docs/auth/silly/access.go create mode 100644 docs/auth/silly/access_test.go create mode 100644 docs/auth/token/accesscontroller.go create mode 100644 docs/auth/token/stringset.go create mode 100644 docs/auth/token/token.go create mode 100644 docs/auth/token/token_test.go create mode 100644 docs/auth/token/util.go diff --git a/docs/auth/auth.go b/docs/auth/auth.go new file mode 100644 index 00000000..cd6ee096 --- /dev/null +++ b/docs/auth/auth.go @@ -0,0 +1,142 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the requset is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.ServeHTTP(w, r) +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "fmt" + "net/http" + + "golang.org/x/net/context" +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given recource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + // ServeHTTP prepares the request to conduct the appropriate challenge + // response. For most implementations, simply calling ServeHTTP should be + // sufficient. Because no body is written, users may write a custom body after + // calling ServeHTTP, but any headers must be written before the call and may + // be overwritten. + ServeHTTP(w http.ResponseWriter, r *http.Request) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case "auth.user": + return uic.user + case "auth.user.name": + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the contsructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go new file mode 100644 index 00000000..7d3a4d40 --- /dev/null +++ b/docs/auth/silly/access.go @@ -0,0 +1,96 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/registry/auth" + ctxu "github.com/docker/distribution/context" + "golang.org/x/net/context" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return context.WithValue(ctx, "auth.user", auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("Authorization", header) + w.WriteHeader(http.StatusUnauthorized) +} + +func (ch *challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go new file mode 100644 index 00000000..d579e878 --- /dev/null +++ b/docs/auth/silly/access_test.go @@ -0,0 +1,70 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(nil, "http.request", r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.ServeHTTP(w, r) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go new file mode 100644 index 00000000..61b275a7 --- /dev/null +++ b/docs/auth/token/accesscontroller.go @@ -0,0 +1,274 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/registry/auth" + ctxu "github.com/docker/distribution/context" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +// Error returns the internal error string for this authChallenge. +func (ac *authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac *authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac *authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetHeader sets the WWW-Authenticate value for the given header. +func (ac *authChallenge) SetHeader(header http.Header) { + header.Add("WWW-Authenticate", ac.challengeParams()) +} + +// ServeHttp handles writing the challenge response +// by setting the challenge header and status code. +func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ac.SetHeader(w.Header()) + w.WriteHeader(ac.Status()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootCertBundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/docs/auth/token/stringset.go b/docs/auth/token/stringset.go new file mode 100644 index 00000000..1d04f104 --- /dev/null +++ b/docs/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/docs/auth/token/token.go b/docs/auth/token/token.go new file mode 100644 index 00000000..166816ee --- /dev/null +++ b/docs/auth/token/token.go @@ -0,0 +1,343 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Errorf("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Errorf("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentUnixTime := time.Now().Unix() + if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Error("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Error(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Errorf("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case len(rawJWK) > 0: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go new file mode 100644 index 00000000..791eb214 --- /dev/null +++ b/docs/auth/token/token_test.go @@ -0,0 +1,386 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + } + + rawJWK, err := signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: json.RawMessage(rawJWK), + } + + now := time.Now() + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: now.Add(5 * time.Minute).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootCertBundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithValue(nil, "http.request", req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } +} diff --git a/docs/auth/token/util.go b/docs/auth/token/util.go new file mode 100644 index 00000000..bf3e01e8 --- /dev/null +++ b/docs/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 876c69b4..5f433e95 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -8,7 +8,7 @@ import ( "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/auth" + "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/storage" diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 249c381b..8da285a3 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/docker/distribution/registry/api/v2" - _ "github.com/docker/distribution/auth/silly" + _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/configuration" "github.com/docker/distribution/storage" "github.com/docker/distribution/storagedriver/inmemory" From d6308bc62b22b50ff968f5284cf6720e8f584290 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 17:35:30 -0800 Subject: [PATCH 021/501] Move client package under registry package Signed-off-by: Stephen J Day --- docs/client/client.go | 574 +++++++++++++++++++++++++++++++++++++ docs/client/client_test.go | 440 ++++++++++++++++++++++++++++ docs/client/errors.go | 79 +++++ docs/client/objectstore.go | 239 +++++++++++++++ docs/client/pull.go | 151 ++++++++++ docs/client/push.go | 137 +++++++++ 6 files changed, 1620 insertions(+) create mode 100644 docs/client/client.go create mode 100644 docs/client/client_test.go create mode 100644 docs/client/errors.go create mode 100644 docs/client/objectstore.go create mode 100644 docs/client/pull.go create mode 100644 docs/client/push.go diff --git a/docs/client/client.go b/docs/client/client.go new file mode 100644 index 00000000..c697e01c --- /dev/null +++ b/docs/client/client.go @@ -0,0 +1,574 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// Client implements the client interface to the registry http api +type Client interface { + // GetImageManifest returns an image manifest for the image at the given + // name, tag pair. + GetImageManifest(name, tag string) (*manifest.SignedManifest, error) + + // PutImageManifest uploads an image manifest for the image at the given + // name, tag pair. + PutImageManifest(name, tag string, imageManifest *manifest.SignedManifest) error + + // DeleteImage removes the image at the given name, tag pair. + DeleteImage(name, tag string) error + + // ListImageTags returns a list of all image tags with the given repository + // name. + ListImageTags(name string) ([]string, error) + + // BlobLength returns the length of the blob stored at the given name, + // digest pair. + // Returns a length value of -1 on error or if the blob does not exist. + BlobLength(name string, dgst digest.Digest) (int, error) + + // GetBlob returns the blob stored at the given name, digest pair in the + // form of an io.ReadCloser with the length of this blob. + // A nonzero byteOffset can be provided to receive a partial blob beginning + // at the given offset. + GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) + + // InitiateBlobUpload starts a blob upload in the given repository namespace + // and returns a unique location url to use for other blob upload methods. + InitiateBlobUpload(name string) (string, error) + + // GetBlobUploadStatus returns the byte offset and length of the blob at the + // given upload location. + GetBlobUploadStatus(location string) (int, int, error) + + // UploadBlob uploads a full blob to the registry. + UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error + + // UploadBlobChunk uploads a blob chunk with a given length and startByte to + // the registry. + // FinishChunkedBlobUpload must be called to finalize this upload. + UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error + + // FinishChunkedBlobUpload completes a chunked blob upload at a given + // location. + FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error + + // CancelBlobUpload deletes all content at the unfinished blob upload + // location and invalidates any future calls to this blob upload. + CancelBlobUpload(location string) error +} + +var ( + patternRangeHeader = regexp.MustCompile("bytes=0-(\\d+)/(\\d+)") +) + +// New returns a new Client which operates against a registry with the +// given base endpoint +// This endpoint should not include /v2/ or any part of the url after this. +func New(endpoint string) (Client, error) { + ub, err := v2.NewURLBuilderFromString(endpoint) + if err != nil { + return nil, err + } + + return &clientImpl{ + endpoint: endpoint, + ub: ub, + }, nil +} + +// clientImpl is the default implementation of the Client interface +type clientImpl struct { + endpoint string + ub *v2.URLBuilder +} + +// TODO(bbland): use consistent route generation between server and client + +func (r *clientImpl) GetImageManifest(name, tag string) (*manifest.SignedManifest, error) { + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return nil, err + } + + response, err := http.Get(manifestURL) + if err != nil { + return nil, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + break + case response.StatusCode == http.StatusNotFound: + return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return nil, err + } + return nil, &errs + default: + return nil, &UnexpectedHTTPStatusError{Status: response.Status} + } + + decoder := json.NewDecoder(response.Body) + + manifest := new(manifest.SignedManifest) + err = decoder.Decode(manifest) + if err != nil { + return nil, err + } + return manifest, nil +} + +func (r *clientImpl) PutImageManifest(name, tag string, manifest *manifest.SignedManifest) error { + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw)) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted: + return nil + case response.StatusCode >= 400 && response.StatusCode < 500: + var errors v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + + return &errors + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) DeleteImage(name, tag string) error { + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return err + } + + deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(deleteRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + break + case response.StatusCode == http.StatusNotFound: + return &ImageManifestNotFoundError{Name: name, Tag: tag} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return err + } + return &errs + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } + + return nil +} + +func (r *clientImpl) ListImageTags(name string) ([]string, error) { + tagsURL, err := r.ub.BuildTagsURL(name) + if err != nil { + return nil, err + } + + response, err := http.Get(tagsURL) + if err != nil { + return nil, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + break + case response.StatusCode == http.StatusNotFound: + return nil, &RepositoryNotFoundError{Name: name} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return nil, err + } + return nil, &errs + default: + return nil, &UnexpectedHTTPStatusError{Status: response.Status} + } + + tags := struct { + Tags []string `json:"tags"` + }{} + + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&tags) + if err != nil { + return nil, err + } + + return tags.Tags, nil +} + +func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { + blobURL, err := r.ub.BuildBlobURL(name, dgst) + if err != nil { + return -1, err + } + + response, err := http.Head(blobURL) + if err != nil { + return -1, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + lengthHeader := response.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return -1, err + } + return int(length), nil + case response.StatusCode == http.StatusNotFound: + return -1, nil + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return -1, err + } + return -1, &errs + default: + response.Body.Close() + return -1, &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { + blobURL, err := r.ub.BuildBlobURL(name, dgst) + if err != nil { + return nil, 0, err + } + + getRequest, err := http.NewRequest("GET", blobURL, nil) + if err != nil { + return nil, 0, err + } + + getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset)) + response, err := http.DefaultClient.Do(getRequest) + if err != nil { + return nil, 0, err + } + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + lengthHeader := response.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 0) + if err != nil { + return nil, 0, err + } + return response.Body, int(length), nil + case response.StatusCode == http.StatusNotFound: + response.Body.Close() + return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return nil, 0, err + } + return nil, 0, &errs + default: + response.Body.Close() + return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { + uploadURL, err := r.ub.BuildBlobUploadURL(name) + if err != nil { + return "", err + } + + postRequest, err := http.NewRequest("POST", uploadURL, nil) + if err != nil { + return "", err + } + + response, err := http.DefaultClient.Do(postRequest) + if err != nil { + return "", err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusAccepted: + return response.Header.Get("Location"), nil + // case response.StatusCode == http.StatusNotFound: + // return + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return "", err + } + return "", &errs + default: + return "", &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { + response, err := http.Get(location) + if err != nil { + return 0, 0, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + return parseRangeHeader(response.Header.Get("Range")) + case response.StatusCode == http.StatusNotFound: + return 0, 0, &BlobUploadNotFoundError{Location: location} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return 0, 0, err + } + return 0, 0, &errs + default: + return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { + defer blob.Close() + + putRequest, err := http.NewRequest("PUT", location, blob) + if err != nil { + return err + } + + values := putRequest.URL.Query() + values.Set("digest", dgst.String()) + putRequest.URL.RawQuery = values.Encode() + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", fmt.Sprint(length)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusCreated: + return nil + case response.StatusCode == http.StatusNotFound: + return &BlobUploadNotFoundError{Location: location} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return err + } + return &errs + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { + defer blobChunk.Close() + + putRequest, err := http.NewRequest("PUT", location, blobChunk) + if err != nil { + return err + } + + endByte := startByte + length + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", fmt.Sprint(length)) + putRequest.Header.Set("Content-Range", + fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusAccepted: + return nil + case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: + lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range")) + if err != nil { + return err + } + return &BlobUploadInvalidRangeError{ + Location: location, + LastValidRange: lastValidRange, + BlobSize: blobSize, + } + case response.StatusCode == http.StatusNotFound: + return &BlobUploadNotFoundError{Location: location} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return err + } + return &errs + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { + putRequest, err := http.NewRequest("PUT", location, nil) + if err != nil { + return err + } + + values := putRequest.URL.Query() + values.Set("digest", dgst.String()) + putRequest.URL.RawQuery = values.Encode() + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", "0") + putRequest.Header.Set("Content-Range", + fmt.Sprintf("%d-%d/%d", length, length, length)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusCreated: + return nil + case response.StatusCode == http.StatusNotFound: + return &BlobUploadNotFoundError{Location: location} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return err + } + return &errs + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) CancelBlobUpload(location string) error { + deleteRequest, err := http.NewRequest("DELETE", location, nil) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(deleteRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + return nil + case response.StatusCode == http.StatusNotFound: + return &BlobUploadNotFoundError{Location: location} + case response.StatusCode >= 400 && response.StatusCode < 500: + var errs v2.Errors + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errs) + if err != nil { + return err + } + return &errs + default: + return &UnexpectedHTTPStatusError{Status: response.Status} + } +} + +// parseRangeHeader parses out the offset and length from a returned Range +// header +func parseRangeHeader(byteRangeHeader string) (int, int, error) { + submatches := patternRangeHeader.FindStringSubmatch(byteRangeHeader) + if submatches == nil || len(submatches) < 3 { + return 0, 0, fmt.Errorf("Malformed Range header") + } + + offset, err := strconv.Atoi(submatches[1]) + if err != nil { + return 0, 0, err + } + length, err := strconv.Atoi(submatches[2]) + if err != nil { + return 0, 0, err + } + return offset, length, nil +} diff --git a/docs/client/client_test.go b/docs/client/client_test.go new file mode 100644 index 00000000..2c1d1cc2 --- /dev/null +++ b/docs/client/client_test.go @@ -0,0 +1,440 @@ +package client + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/testutil" +) + +type testBlob struct { + digest digest.Digest + contents []byte +} + +func TestRangeHeaderParser(t *testing.T) { + const ( + malformedRangeHeader = "bytes=0-A/C" + emptyRangeHeader = "" + rFirst = 100 + rSecond = 200 + ) + + var ( + wellformedRangeHeader = fmt.Sprintf("bytes=0-%d/%d", rFirst, rSecond) + ) + + if _, _, err := parseRangeHeader(malformedRangeHeader); err == nil { + t.Fatalf("malformedRangeHeader: error expected, got nil") + } + + if _, _, err := parseRangeHeader(emptyRangeHeader); err == nil { + t.Fatalf("emptyRangeHeader: error expected, got nil") + } + + first, second, err := parseRangeHeader(wellformedRangeHeader) + if err != nil { + t.Fatalf("wellformedRangeHeader: unexpected error %v", err) + } + + if first != rFirst || second != rSecond { + t.Fatalf("Range has been parsed unproperly: %d/%d", first, second) + } + +} + +func TestPush(t *testing.T) { + name := "hello/world" + tag := "sometag" + testBlobs := []testBlob{ + { + digest: "tarsum.v2+sha256:12345", + contents: []byte("some contents"), + }, + { + digest: "tarsum.v2+sha256:98765", + contents: []byte("some other contents"), + }, + } + uploadLocations := make([]string, len(testBlobs)) + blobs := make([]manifest.FSLayer, len(testBlobs)) + history := make([]manifest.History, len(testBlobs)) + + for i, blob := range testBlobs { + // TODO(bbland): this is returning the same location for all uploads, + // because we can't know which blob will get which location. + // It's sort of okay because we're using unique digests, but this needs + // to change at some point. + uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name) + blobs[i] = manifest.FSLayer{BlobSum: blob.digest} + history[i] = manifest.History{V1Compatibility: blob.digest.String()} + } + + m := &manifest.SignedManifest{ + Manifest: manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + }, + } + var err error + m.Raw, err = json.Marshal(m) + + blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) + for i, blob := range testBlobs { + blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + name + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {uploadLocations[i]}, + }), + }, + } + blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: uploadLocations[i], + QueryParams: map[string][]string{ + "digest": {blob.digest.String()}, + }, + Body: blob.contents, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + }, + } + } + + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + name + "/manifests/" + tag, + Body: m.Raw, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + }, + })) + var server *httptest.Server + + // HACK(stevvooe): Super hack to follow: the request response map approach + // above does not let us correctly format the location header to the + // server url. This handler intercepts and re-writes the location header + // to the server url. + + hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL} + handler.ServeHTTP(w, r) + }) + + server = httptest.NewServer(hack) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*manifest.SignedManifest), + layerStorage: make(map[digest.Digest]Layer), + } + + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) + if err != nil { + t.Fatal(err) + } + + writer, err := l.Writer() + if err != nil { + t.Fatal(err) + } + + writer.SetSize(len(blob.contents)) + writer.Write(blob.contents) + writer.Close() + } + + objectStore.WriteManifest(name, tag, m) + + err = Push(client, objectStore, name, tag) + if err != nil { + t.Fatal(err) + } +} + +func TestPull(t *testing.T) { + name := "hello/world" + tag := "sometag" + testBlobs := []testBlob{ + { + digest: "tarsum.v2+sha256:12345", + contents: []byte("some contents"), + }, + { + digest: "tarsum.v2+sha256:98765", + contents: []byte("some other contents"), + }, + } + blobs := make([]manifest.FSLayer, len(testBlobs)) + history := make([]manifest.History, len(testBlobs)) + + for i, blob := range testBlobs { + blobs[i] = manifest.FSLayer{BlobSum: blob.digest} + history[i] = manifest.History{V1Compatibility: blob.digest.String()} + } + + m := &manifest.SignedManifest{ + Manifest: manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + }, + } + manifestBytes, err := json.Marshal(m) + + blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs)) + for i, blob := range testBlobs { + blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: blob.contents, + }, + } + } + + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/manifests/" + tag, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + })) + server := httptest.NewServer(handler) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*manifest.SignedManifest), + layerStorage: make(map[digest.Digest]Layer), + } + + err = Pull(client, objectStore, name, tag) + if err != nil { + t.Fatal(err) + } + + m, err = objectStore.Manifest(name, tag) + if err != nil { + t.Fatal(err) + } + + mBytes, err := json.Marshal(m) + if err != nil { + t.Fatal(err) + } + + if string(mBytes) != string(manifestBytes) { + t.Fatal("Incorrect manifest") + } + + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) + if err != nil { + t.Fatal(err) + } + + reader, err := l.Reader() + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + blobBytes, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if string(blobBytes) != string(blob.contents) { + t.Fatal("Incorrect blob") + } + } +} + +func TestPullResume(t *testing.T) { + name := "hello/world" + tag := "sometag" + testBlobs := []testBlob{ + { + digest: "tarsum.v2+sha256:12345", + contents: []byte("some contents"), + }, + { + digest: "tarsum.v2+sha256:98765", + contents: []byte("some other contents"), + }, + } + layers := make([]manifest.FSLayer, len(testBlobs)) + history := make([]manifest.History, len(testBlobs)) + + for i, layer := range testBlobs { + layers[i] = manifest.FSLayer{BlobSum: layer.digest} + history[i] = manifest.History{V1Compatibility: layer.digest.String()} + } + + m := &manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: layers, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + } + manifestBytes, err := json.Marshal(m) + + layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) + for i, blob := range testBlobs { + layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: blob.contents[:len(blob.contents)/2], + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(blob.contents))}, + }), + }, + } + layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: blob.contents[len(blob.contents)/2:], + }, + } + } + + for i := 0; i < 3; i++ { + layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/manifests/" + tag, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + }) + } + + handler := testutil.NewHandler(layerRequestResponseMappings) + server := httptest.NewServer(handler) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*manifest.SignedManifest), + layerStorage: make(map[digest.Digest]Layer), + } + + for attempts := 0; attempts < 3; attempts++ { + err = Pull(client, objectStore, name, tag) + if err == nil { + break + } + } + + if err != nil { + t.Fatal(err) + } + + sm, err := objectStore.Manifest(name, tag) + if err != nil { + t.Fatal(err) + } + + mBytes, err := json.Marshal(sm) + if err != nil { + t.Fatal(err) + } + + if string(mBytes) != string(manifestBytes) { + t.Fatal("Incorrect manifest") + } + + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) + if err != nil { + t.Fatal(err) + } + + reader, err := l.Reader() + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + layerBytes, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if string(layerBytes) != string(blob.contents) { + t.Fatal("Incorrect blob") + } + } +} + +// headerInterceptingResponseWriter is a hacky workaround to re-write the +// location header to have the server url. +type headerInterceptingResponseWriter struct { + http.ResponseWriter + serverURL string +} + +func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) { + location := hirw.Header().Get("Location") + if location != "" { + hirw.Header().Set("Location", hirw.serverURL+location) + } + + hirw.ResponseWriter.WriteHeader(status) +} diff --git a/docs/client/errors.go b/docs/client/errors.go new file mode 100644 index 00000000..3e89e674 --- /dev/null +++ b/docs/client/errors.go @@ -0,0 +1,79 @@ +package client + +import ( + "fmt" + + "github.com/docker/distribution/digest" +) + +// RepositoryNotFoundError is returned when making an operation against a +// repository that does not exist in the registry. +type RepositoryNotFoundError struct { + Name string +} + +func (e *RepositoryNotFoundError) Error() string { + return fmt.Sprintf("No repository found with Name: %s", e.Name) +} + +// ImageManifestNotFoundError is returned when making an operation against a +// given image manifest that does not exist in the registry. +type ImageManifestNotFoundError struct { + Name string + Tag string +} + +func (e *ImageManifestNotFoundError) Error() string { + return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", + e.Name, e.Tag) +} + +// BlobNotFoundError is returned when making an operation against a given image +// layer that does not exist in the registry. +type BlobNotFoundError struct { + Name string + Digest digest.Digest +} + +func (e *BlobNotFoundError) Error() string { + return fmt.Sprintf("No blob found with Name: %s, Digest: %s", + e.Name, e.Digest) +} + +// BlobUploadNotFoundError is returned when making a blob upload operation against an +// invalid blob upload location url. +// This may be the result of using a cancelled, completed, or stale upload +// location. +type BlobUploadNotFoundError struct { + Location string +} + +func (e *BlobUploadNotFoundError) Error() string { + return fmt.Sprintf("No blob upload found at Location: %s", e.Location) +} + +// BlobUploadInvalidRangeError is returned when attempting to upload an image +// blob chunk that is out of order. +// This provides the known BlobSize and LastValidRange which can be used to +// resume the upload. +type BlobUploadInvalidRangeError struct { + Location string + LastValidRange int + BlobSize int +} + +func (e *BlobUploadInvalidRangeError) Error() string { + return fmt.Sprintf( + "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", + e.Location, e.LastValidRange, e.BlobSize) +} + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) +} diff --git a/docs/client/objectstore.go b/docs/client/objectstore.go new file mode 100644 index 00000000..5969c9d2 --- /dev/null +++ b/docs/client/objectstore.go @@ -0,0 +1,239 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +var ( + // ErrLayerAlreadyExists is returned when attempting to create a layer with + // a tarsum that is already in use. + ErrLayerAlreadyExists = fmt.Errorf("Layer already exists") + + // ErrLayerLocked is returned when attempting to write to a layer which is + // currently being written to. + ErrLayerLocked = fmt.Errorf("Layer locked") +) + +// ObjectStore is an interface which is designed to approximate the docker +// engine storage. This interface is subject to change to conform to the +// future requirements of the engine. +type ObjectStore interface { + // Manifest retrieves the image manifest stored at the given repository name + // and tag + Manifest(name, tag string) (*manifest.SignedManifest, error) + + // WriteManifest stores an image manifest at the given repository name and + // tag + WriteManifest(name, tag string, manifest *manifest.SignedManifest) error + + // Layer returns a handle to a layer for reading and writing + Layer(dgst digest.Digest) (Layer, error) +} + +// Layer is a generic image layer interface. +// A Layer may not be written to if it is already complete. +type Layer interface { + // Reader returns a LayerReader or an error if the layer has not been + // written to or is currently being written to. + Reader() (LayerReader, error) + + // Writer returns a LayerWriter or an error if the layer has been fully + // written to or is currently being written to. + Writer() (LayerWriter, error) + + // Wait blocks until the Layer can be read from. + Wait() error +} + +// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize +// and full Size in addition to implementing the io.ReadCloser interface. +type LayerReader interface { + io.ReadCloser + + // CurrentSize returns the number of bytes written to the underlying Layer + CurrentSize() int + + // Size returns the full size of the underlying Layer + Size() int +} + +// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize +// and full Size in addition to implementing the io.WriteCloser interface. +// SetSize must be called on this LayerWriter before it can be written to. +type LayerWriter interface { + io.WriteCloser + + // CurrentSize returns the number of bytes written to the underlying Layer + CurrentSize() int + + // Size returns the full size of the underlying Layer + Size() int + + // SetSize sets the full size of the underlying Layer. + // This must be called before any calls to Write + SetSize(int) error +} + +// memoryObjectStore is an in-memory implementation of the ObjectStore interface +type memoryObjectStore struct { + mutex *sync.Mutex + manifestStorage map[string]*manifest.SignedManifest + layerStorage map[digest.Digest]Layer +} + +func (objStore *memoryObjectStore) Manifest(name, tag string) (*manifest.SignedManifest, error) { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + manifest, ok := objStore.manifestStorage[name+":"+tag] + if !ok { + return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag) + } + return manifest, nil +} + +func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *manifest.SignedManifest) error { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + objStore.manifestStorage[name+":"+tag] = manifest + return nil +} + +func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + layer, ok := objStore.layerStorage[dgst] + if !ok { + layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} + objStore.layerStorage[dgst] = layer + } + + return layer, nil +} + +type memoryLayer struct { + cond *sync.Cond + contents []byte + expectedSize int + writing bool +} + +func (ml *memoryLayer) Reader() (LayerReader, error) { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.contents == nil { + return nil, fmt.Errorf("Layer has not been written to yet") + } + if ml.writing { + return nil, ErrLayerLocked + } + + return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil +} + +func (ml *memoryLayer) Writer() (LayerWriter, error) { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.contents != nil { + if ml.writing { + return nil, ErrLayerLocked + } + if ml.expectedSize == len(ml.contents) { + return nil, ErrLayerAlreadyExists + } + } else { + ml.contents = make([]byte, 0) + } + + ml.writing = true + return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil +} + +func (ml *memoryLayer) Wait() error { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.contents == nil { + return fmt.Errorf("No writer to wait on") + } + + for ml.writing { + ml.cond.Wait() + } + + return nil +} + +type memoryLayerReader struct { + ml *memoryLayer + reader *bytes.Reader +} + +func (mlr *memoryLayerReader) Read(p []byte) (int, error) { + return mlr.reader.Read(p) +} + +func (mlr *memoryLayerReader) Close() error { + return nil +} + +func (mlr *memoryLayerReader) CurrentSize() int { + return len(mlr.ml.contents) +} + +func (mlr *memoryLayerReader) Size() int { + return mlr.ml.expectedSize +} + +type memoryLayerWriter struct { + ml *memoryLayer + buffer *bytes.Buffer +} + +func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { + if mlw.ml.expectedSize == 0 { + return 0, fmt.Errorf("Must set size before writing to layer") + } + wrote, err := mlw.buffer.Write(p) + mlw.ml.contents = mlw.buffer.Bytes() + return wrote, err +} + +func (mlw *memoryLayerWriter) Close() error { + mlw.ml.cond.L.Lock() + defer mlw.ml.cond.L.Unlock() + + return mlw.close() +} + +func (mlw *memoryLayerWriter) close() error { + mlw.ml.writing = false + mlw.ml.cond.Broadcast() + return nil +} + +func (mlw *memoryLayerWriter) CurrentSize() int { + return len(mlw.ml.contents) +} + +func (mlw *memoryLayerWriter) Size() int { + return mlw.ml.expectedSize +} + +func (mlw *memoryLayerWriter) SetSize(size int) error { + if !mlw.ml.writing { + return fmt.Errorf("Layer is closed for writing") + } + mlw.ml.expectedSize = size + return nil +} diff --git a/docs/client/pull.go b/docs/client/pull.go new file mode 100644 index 00000000..385158db --- /dev/null +++ b/docs/client/pull.go @@ -0,0 +1,151 @@ +package client + +import ( + "fmt" + "io" + + log "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/manifest" +) + +// simultaneousLayerPullWindow is the size of the parallel layer pull window. +// A layer may not be pulled until the layer preceeding it by the length of the +// pull window has been successfully pulled. +const simultaneousLayerPullWindow = 4 + +// Pull implements a client pull workflow for the image defined by the given +// name and tag pair, using the given ObjectStore for local manifest and layer +// storage +func Pull(c Client, objectStore ObjectStore, name, tag string) error { + manifest, err := c.GetImageManifest(name, tag) + if err != nil { + return err + } + log.WithField("manifest", manifest).Info("Pulled manifest") + + if len(manifest.FSLayers) != len(manifest.History) { + return fmt.Errorf("Length of history not equal to number of layers") + } + if len(manifest.FSLayers) == 0 { + return fmt.Errorf("Image has no layers") + } + + errChans := make([]chan error, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + errChans[i] = make(chan error) + } + + // To avoid leak of goroutines we must notify + // pullLayer goroutines about a cancelation, + // otherwise they will lock forever. + cancelCh := make(chan struct{}) + + // Iterate over each layer in the manifest, simultaneously pulling no more + // than simultaneousLayerPullWindow layers at a time. If an error is + // received from a layer pull, we abort the push. + for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ { + dependentLayer := i - simultaneousLayerPullWindow + if dependentLayer >= 0 { + err := <-errChans[dependentLayer] + if err != nil { + log.WithField("error", err).Warn("Pull aborted") + close(cancelCh) + return err + } + } + + if i < len(manifest.FSLayers) { + go func(i int) { + select { + case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]): + case <-cancelCh: // no chance to recv until cancelCh's closed + } + }(i) + } + } + + err = objectStore.WriteManifest(name, tag, manifest) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "manifest": manifest, + }).Warn("Unable to write image manifest") + return err + } + + return nil +} + +func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { + log.WithField("layer", fsLayer).Info("Pulling layer") + + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + + layerWriter, err := layer.Writer() + if err == ErrLayerAlreadyExists { + log.WithField("layer", fsLayer).Info("Layer already exists") + return nil + } + if err == ErrLayerLocked { + log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") + layer.Wait() + return nil + } + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + defer layerWriter.Close() + + if layerWriter.CurrentSize() > 0 { + log.WithFields(log.Fields{ + "layer": fsLayer, + "currentSize": layerWriter.CurrentSize(), + "size": layerWriter.Size(), + }).Info("Layer partially downloaded, resuming") + } + + layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize()) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + defer layerReader.Close() + + layerWriter.SetSize(layerWriter.CurrentSize() + length) + + _, err = io.Copy(layerWriter, layerReader) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + if layerWriter.CurrentSize() != layerWriter.Size() { + log.WithFields(log.Fields{ + "size": layerWriter.Size(), + "currentSize": layerWriter.CurrentSize(), + "layer": fsLayer, + }).Warn("Layer invalid size") + return fmt.Errorf( + "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", + fsLayer, layerWriter.Size(), layerWriter.CurrentSize(), + ) + } + return nil +} diff --git a/docs/client/push.go b/docs/client/push.go new file mode 100644 index 00000000..c26bd174 --- /dev/null +++ b/docs/client/push.go @@ -0,0 +1,137 @@ +package client + +import ( + "fmt" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/manifest" +) + +// simultaneousLayerPushWindow is the size of the parallel layer push window. +// A layer may not be pushed until the layer preceeding it by the length of the +// push window has been successfully pushed. +const simultaneousLayerPushWindow = 4 + +type pushFunction func(fsLayer manifest.FSLayer) error + +// Push implements a client push workflow for the image defined by the given +// name and tag pair, using the given ObjectStore for local manifest and layer +// storage +func Push(c Client, objectStore ObjectStore, name, tag string) error { + manifest, err := objectStore.Manifest(name, tag) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "name": name, + "tag": tag, + }).Info("No image found") + return err + } + + errChans := make([]chan error, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + errChans[i] = make(chan error) + } + + cancelCh := make(chan struct{}) + + // Iterate over each layer in the manifest, simultaneously pushing no more + // than simultaneousLayerPushWindow layers at a time. If an error is + // received from a layer push, we abort the push. + for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ { + dependentLayer := i - simultaneousLayerPushWindow + if dependentLayer >= 0 { + err := <-errChans[dependentLayer] + if err != nil { + log.WithField("error", err).Warn("Push aborted") + close(cancelCh) + return err + } + } + + if i < len(manifest.FSLayers) { + go func(i int) { + select { + case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]): + case <-cancelCh: // recv broadcast notification about cancelation + } + }(i) + } + } + + err = c.PutImageManifest(name, tag, manifest) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "manifest": manifest, + }).Warn("Unable to upload manifest") + return err + } + + return nil +} + +func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { + log.WithField("layer", fsLayer).Info("Pushing layer") + + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + layerReader, err := layer.Reader() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + defer layerReader.Close() + + if layerReader.CurrentSize() != layerReader.Size() { + log.WithFields(log.Fields{ + "layer": fsLayer, + "currentSize": layerReader.CurrentSize(), + "size": layerReader.Size(), + }).Warn("Local layer incomplete") + return fmt.Errorf("Local layer incomplete") + } + + length, err := c.BlobLength(name, fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to check existence of remote layer") + return err + } + if length >= 0 { + log.WithField("layer", fsLayer).Info("Layer already exists") + return nil + } + + location, err := c.InitiateBlobUpload(name) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + return nil +} From 71e7ac33cac7d71ffcf492e618cc3b7a139a7656 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 17:41:09 -0800 Subject: [PATCH 022/501] Move storage package under registry package Signed-off-by: Stephen J Day --- docs/handlers/app.go | 4 +- docs/handlers/app_test.go | 2 +- docs/handlers/context.go | 2 +- docs/handlers/images.go | 2 +- docs/handlers/layer.go | 2 +- docs/handlers/layerupload.go | 2 +- docs/handlers/tags.go | 2 +- docs/storage/blobstore.go | 159 +++++++ docs/storage/cloudfrontlayerhandler.go | 122 ++++++ docs/storage/delegatelayerhandler.go | 94 ++++ docs/storage/doc.go | 3 + docs/storage/filereader.go | 201 +++++++++ docs/storage/filereader_test.go | 193 +++++++++ docs/storage/filewriter.go | 150 +++++++ docs/storage/filewriter_test.go | 148 +++++++ docs/storage/layer.go | 90 ++++ docs/storage/layer_test.go | 364 ++++++++++++++++ docs/storage/layerhandler.go | 50 +++ docs/storage/layerreader.go | 30 ++ docs/storage/layerstore.go | 168 +++++++ docs/storage/layerupload.go | 238 ++++++++++ docs/storage/manifeststore.go | 190 ++++++++ docs/storage/manifeststore_test.go | 233 ++++++++++ docs/storage/notifications/bridge.go | 156 +++++++ docs/storage/notifications/endpoint.go | 86 ++++ docs/storage/notifications/event.go | 154 +++++++ docs/storage/notifications/event_test.go | 145 +++++++ docs/storage/notifications/http.go | 145 +++++++ docs/storage/notifications/http_test.go | 155 +++++++ docs/storage/notifications/listener.go | 140 ++++++ docs/storage/notifications/listener_test.go | 153 +++++++ docs/storage/notifications/metrics.go | 152 +++++++ docs/storage/notifications/sinks.go | 337 ++++++++++++++ docs/storage/notifications/sinks_test.go | 223 ++++++++++ docs/storage/paths.go | 458 ++++++++++++++++++++ docs/storage/paths_test.go | 138 ++++++ docs/storage/registry.go | 80 ++++ docs/storage/revisionstore.go | 207 +++++++++ docs/storage/services.go | 84 ++++ docs/storage/tagstore.go | 157 +++++++ 40 files changed, 5411 insertions(+), 8 deletions(-) create mode 100644 docs/storage/blobstore.go create mode 100644 docs/storage/cloudfrontlayerhandler.go create mode 100644 docs/storage/delegatelayerhandler.go create mode 100644 docs/storage/doc.go create mode 100644 docs/storage/filereader.go create mode 100644 docs/storage/filereader_test.go create mode 100644 docs/storage/filewriter.go create mode 100644 docs/storage/filewriter_test.go create mode 100644 docs/storage/layer.go create mode 100644 docs/storage/layer_test.go create mode 100644 docs/storage/layerhandler.go create mode 100644 docs/storage/layerreader.go create mode 100644 docs/storage/layerstore.go create mode 100644 docs/storage/layerupload.go create mode 100644 docs/storage/manifeststore.go create mode 100644 docs/storage/manifeststore_test.go create mode 100644 docs/storage/notifications/bridge.go create mode 100644 docs/storage/notifications/endpoint.go create mode 100644 docs/storage/notifications/event.go create mode 100644 docs/storage/notifications/event_test.go create mode 100644 docs/storage/notifications/http.go create mode 100644 docs/storage/notifications/http_test.go create mode 100644 docs/storage/notifications/listener.go create mode 100644 docs/storage/notifications/listener_test.go create mode 100644 docs/storage/notifications/metrics.go create mode 100644 docs/storage/notifications/sinks.go create mode 100644 docs/storage/notifications/sinks_test.go create mode 100644 docs/storage/paths.go create mode 100644 docs/storage/paths_test.go create mode 100644 docs/storage/registry.go create mode 100644 docs/storage/revisionstore.go create mode 100644 docs/storage/services.go create mode 100644 docs/storage/tagstore.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 5f433e95..09c0c621 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -11,8 +11,8 @@ import ( "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/storage" - "github.com/docker/distribution/storage/notifications" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/notifications" "github.com/docker/distribution/storagedriver" "github.com/docker/distribution/storagedriver/factory" "github.com/gorilla/mux" diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 8da285a3..b27c788a 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/configuration" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/storagedriver/inmemory" "golang.org/x/net/context" ) diff --git a/docs/handlers/context.go b/docs/handlers/context.go index c940d8f4..a49253ee 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -7,7 +7,7 @@ import ( "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "golang.org/x/net/context" ) diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 9c2dfa43..6a0e9a40 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -9,7 +9,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 73641dde..8214fbf0 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 190cf86b..83ef6fb6 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 1e8e43d5..0a764693 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -5,7 +5,7 @@ import ( "net/http" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/storage" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go new file mode 100644 index 00000000..ac123f44 --- /dev/null +++ b/docs/storage/blobstore.go @@ -0,0 +1,159 @@ +package storage + +import ( + "fmt" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storagedriver" + "golang.org/x/net/context" +) + +// TODO(stevvooe): Currently, the blobStore implementation used by the +// manifest store. The layer store should be refactored to better leverage the +// blobStore, reducing duplicated code. + +// blobStore implements a generalized blob store over a driver, supporting the +// read side and link management. This object is intentionally a leaky +// abstraction, providing utility methods that support creating and traversing +// backend links. +type blobStore struct { + *registry + ctx context.Context +} + +// exists reports whether or not the path exists. If the driver returns error +// other than storagedriver.PathNotFound, an error may be returned. +func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { + path, err := bs.path(dgst) + + if err != nil { + return false, err + } + + ok, err := exists(bs.driver, path) + if err != nil { + return false, err + } + + return ok, nil +} + +// get retrieves the blob by digest, returning it a byte slice. This should +// only be used for small objects. +func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { + bp, err := bs.path(dgst) + if err != nil { + return nil, err + } + + return bs.driver.GetContent(bp) +} + +// link links the path to the provided digest by writing the digest into the +// target file. +func (bs *blobStore) link(path string, dgst digest.Digest) error { + if exists, err := bs.exists(dgst); err != nil { + return err + } else if !exists { + return fmt.Errorf("cannot link non-existent blob") + } + + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(path, []byte(dgst)) +} + +// linked reads the link at path and returns the content. +func (bs *blobStore) linked(path string) ([]byte, error) { + linked, err := bs.readlink(path) + if err != nil { + return nil, err + } + + return bs.get(linked) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + if exists, err := bs.exists(linked); err != nil { + return "", err + } else if !exists { + return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked) + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store link. +func (bs *blobStore) resolve(path string) (string, error) { + dgst, err := bs.readlink(path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +// put stores the content p in the blob store, calculating the digest. If the +// content is already present, only the digest will be returned. This should +// only be used for small objects, such as manifests. +func (bs *blobStore) put(p []byte) (digest.Digest, error) { + dgst, err := digest.FromBytes(p) + if err != nil { + ctxu.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) + return "", err + } + + bp, err := bs.path(dgst) + if err != nil { + return "", err + } + + // If the content already exists, just return the digest. + if exists, err := bs.exists(dgst); err != nil { + return "", err + } else if exists { + return dgst, nil + } + + return dgst, bs.driver.PutContent(bp, p) +} + +// path returns the canonical path for the blob identified by digest. The blob +// may or may not exist. +func (bs *blobStore) path(dgst digest.Digest) (string, error) { + bp, err := bs.pm.path(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return "", err + } + + return bp, nil +} + +// exists provides a utility method to test whether or not +func exists(driver storagedriver.StorageDriver, path string) (bool, error) { + if _, err := driver.Stat(path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} diff --git a/docs/storage/cloudfrontlayerhandler.go b/docs/storage/cloudfrontlayerhandler.go new file mode 100644 index 00000000..fa420cc7 --- /dev/null +++ b/docs/storage/cloudfrontlayerhandler.go @@ -0,0 +1,122 @@ +package storage + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution/storagedriver" +) + +// cloudFrontLayerHandler provides an simple implementation of layerHandler that +// constructs temporary signed CloudFront URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this CloudFront content URL. +type cloudFrontLayerHandler struct { + cloudfront *cloudfront.CloudFront + delegateLayerHandler *delegateLayerHandler + duration time.Duration +} + +var _ LayerHandler = &cloudFrontLayerHandler{} + +// newCloudFrontLayerHandler constructs and returns a new CloudFront +// LayerHandler implementation. +// Required options: baseurl, privatekey, keypairid +func newCloudFrontLayerHandler(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) { + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("No baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("No privatekey provided") + } + pkPath, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + kpid, ok := options["keypairid"] + if !ok { + return nil, fmt.Errorf("No keypairid provided") + } + keypairID, ok := kpid.(string) + if !ok { + return nil, fmt.Errorf("keypairid must be a string") + } + + pkBytes, err := ioutil.ReadFile(pkPath) + if err != nil { + return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + } + + block, _ := pem.Decode([]byte(pkBytes)) + if block == nil { + return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + } + privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + + lh, err := newDelegateLayerHandler(storageDriver, options) + if err != nil { + return nil, err + } + dlh := lh.(*delegateLayerHandler) + + cf := cloudfront.New(baseURL, privateKey, keypairID) + + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("Invalid duration: %s", err) + } + duration = dur + } + } + + return &cloudFrontLayerHandler{cloudfront: cf, delegateLayerHandler: dlh, duration: duration}, nil +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *cloudFrontLayerHandler) Resolve(layer Layer) (http.Handler, error) { + layerURLStr, err := lh.delegateLayerHandler.urlFor(layer, nil) + if err != nil { + return nil, err + } + + layerURL, err := url.Parse(layerURLStr) + if err != nil { + return nil, err + } + + cfURL, err := lh.cloudfront.CannedSignedURL(layerURL.Path, "", time.Now().Add(lh.duration)) + if err != nil { + return nil, err + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, cfURL, http.StatusTemporaryRedirect) + }), nil +} + +// init registers the cloudfront layerHandler backend. +func init() { + RegisterLayerHandler("cloudfront", LayerHandlerInitFunc(newCloudFrontLayerHandler)) +} diff --git a/docs/storage/delegatelayerhandler.go b/docs/storage/delegatelayerhandler.go new file mode 100644 index 00000000..7ed6d87b --- /dev/null +++ b/docs/storage/delegatelayerhandler.go @@ -0,0 +1,94 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution/storagedriver" +) + +// delegateLayerHandler provides a simple implementation of layerHandler that +// simply issues HTTP Temporary Redirects to the URL provided by the +// storagedriver for a given Layer. +type delegateLayerHandler struct { + storageDriver storagedriver.StorageDriver + pathMapper *pathMapper + duration time.Duration +} + +var _ LayerHandler = &delegateLayerHandler{} + +func newDelegateLayerHandler(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) { + duration := 20 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("Invalid duration: %s", err) + } + duration = dur + } + } + + return &delegateLayerHandler{storageDriver: storageDriver, pathMapper: defaultPathMapper, duration: duration}, nil +} + +// Resolve returns an http.Handler which can serve the contents of the given +// Layer, or an error if not supported by the storagedriver. +func (lh *delegateLayerHandler) Resolve(layer Layer) (http.Handler, error) { + // TODO(bbland): This is just a sanity check to ensure that the + // storagedriver supports url generation. It would be nice if we didn't have + // to do this twice for non-GET requests. + layerURL, err := lh.urlFor(layer, map[string]interface{}{"method": "GET"}) + if err != nil { + return nil, err + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + layerURL, err = lh.urlFor(layer, map[string]interface{}{"method": r.Method}) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + http.Redirect(w, r, layerURL, http.StatusTemporaryRedirect) + }), nil +} + +// urlFor returns a download URL for the given layer, or the empty string if +// unsupported. +func (lh *delegateLayerHandler) urlFor(layer Layer, options map[string]interface{}) (string, error) { + // Crack open the layer to get at the layerStore + layerRd, ok := layer.(*layerReader) + if !ok { + // TODO(stevvooe): We probably want to find a better way to get at the + // underlying filesystem path for a given layer. Perhaps, the layer + // handler should have its own layer store but right now, it is not + // request scoped. + return "", fmt.Errorf("unsupported layer type: cannot resolve blob path: %v", layer) + } + + if options == nil { + options = make(map[string]interface{}) + } + options["expiry"] = time.Now().Add(lh.duration) + + layerURL, err := lh.storageDriver.URLFor(layerRd.path, options) + if err != nil { + return "", err + } + + return layerURL, nil +} + +// init registers the delegate layerHandler backend. +func init() { + RegisterLayerHandler("delegate", LayerHandlerInitFunc(newDelegateLayerHandler)) +} diff --git a/docs/storage/doc.go b/docs/storage/doc.go new file mode 100644 index 00000000..387d9234 --- /dev/null +++ b/docs/storage/doc.go @@ -0,0 +1,3 @@ +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. +package storage diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go new file mode 100644 index 00000000..9bc09afe --- /dev/null +++ b/docs/storage/filereader.go @@ -0,0 +1,201 @@ +package storage + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/docker/distribution/storagedriver" +) + +// TODO(stevvooe): Set an optimal buffer size here. We'll have to +// understand the latency characteristics of the underlying network to +// set this correctly, so we may want to leave it to the driver. For +// out of process drivers, we'll have to optimize this buffer size for +// local communication. +const fileReaderBufferSize = 4 << 20 + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + // identifying fields + path string + size int64 // size is the total layer size, must be set. + modtime time.Time + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +// newFileReader initializes a file reader for the remote file. The read takes +// on the offset and size at the time the reader is created. If the underlying +// file changes, one must create a new fileReader. +func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { + rd := &fileReader{ + driver: driver, + path: path, + } + + // Grab the size of the layer file, ensuring existence. + if fi, err := driver.Stat(path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is not + // actually present for the reader. If the caller needs to know + // whether or not the file exists, they should issue a stat call + // on the path. There is still no guarantee, since the file may be + // gone by the time the reader is created. The only correct + // behavior is to return a reader that immediately returns EOF. + default: + // Any other error we want propagated up the stack. + return nil, err + } + } else { + if fi.IsDir() { + return nil, fmt.Errorf("cannot read a directory") + } + + // Fill in file information + rd.size = fi.Size() + rd.modtime = fi.ModTime() + } + + return rd, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +// Close the layer. Should be called when the resource is no longer needed. +func (fr *fileReader) Close() error { + if fr.err != nil { + return fr.err + } + + fr.err = ErrLayerClosed + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.ReadStream(fr.path, fr.offset) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): If the path is not found, we simply return a + // reader that returns io.EOF. However, we do not set fr.rc, + // allowing future attempts at getting a reader to possibly + // succeed if the file turns up later. + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + default: + return nil, err + } + } + + fr.rc = rc + + if fr.brd == nil { + // TODO(stevvooe): Set an optimal buffer size here. We'll have to + // understand the latency characteristics of the underlying network to + // set this correctly, so we may want to leave it to the driver. For + // out of process drivers, we'll have to optimize this buffer size for + // local communication. + fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go new file mode 100644 index 00000000..53dd6c9a --- /dev/null +++ b/docs/storage/filereader_test.go @@ -0,0 +1,193 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/distribution/digest" + + "github.com/docker/distribution/storagedriver/inmemory" +) + +func TestSimpleRead(t *testing.T) { + content := make([]byte, 1<<20) + n, err := rand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read did't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(driver, path) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier := digest.NewDigestVerifier(dgst) + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + + if err := driver.PutContent(path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(driver, path) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + // 5. Seek after end, + after, err := fr.Seek(1, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error expected, returned offset=%v", after) + } + + p := make([]byte, 16) + n, err := fr.Read(p) + + if n != 0 { + t.Fatalf("bytes reads %d != %d", n, 0) + } + + if err != io.EOF { + t.Fatalf("expected io.EOF, got %v", err) + } +} + +// TestFileReaderNonExistentFile ensures the reader behaves as expected with a +// missing or zero-length remote file. While the file may not exist, the +// reader should not error out on creation and should return 0-bytes from the +// read method, with an io.EOF error. +func TestFileReaderNonExistentFile(t *testing.T) { + driver := inmemory.New() + fr, err := newFileReader(driver, "/doesnotexist") + if err != nil { + t.Fatalf("unexpected error initializing reader: %v", err) + } + + var buf [1024]byte + + n, err := fr.Read(buf[:]) + if n != 0 { + t.Fatalf("non-zero byte read reported: %d != 0", n) + } + + if err != io.EOF { + t.Fatalf("read on missing file should return io.EOF, got %v", err) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is a incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go new file mode 100644 index 00000000..5037f160 --- /dev/null +++ b/docs/storage/filewriter.go @@ -0,0 +1,150 @@ +package storage + +import ( + "bytes" + "fmt" + "io" + "os" + + "github.com/docker/distribution/storagedriver" +) + +// fileWriter implements a remote file writer backed by a storage driver. +type fileWriter struct { + driver storagedriver.StorageDriver + + // identifying fields + path string + + // mutable fields + size int64 // size of the file, aka the current end + offset int64 // offset is the current write offset + err error // terminal error, if set, reader is closed +} + +// fileWriterInterface makes the desired io compliant interface that the +// filewriter should implement. +type fileWriterInterface interface { + io.WriteSeeker + io.WriterAt + io.ReaderFrom + io.Closer +} + +var _ fileWriterInterface = &fileWriter{} + +// newFileWriter returns a prepared fileWriter for the driver and path. This +// could be considered similar to an "open" call on a regular filesystem. +func newFileWriter(driver storagedriver.StorageDriver, path string) (*fileWriter, error) { + fw := fileWriter{ + driver: driver, + path: path, + } + + if fi, err := driver.Stat(path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // ignore, offset is zero + default: + return nil, err + } + } else { + if fi.IsDir() { + return nil, fmt.Errorf("cannot write to a directory") + } + + fw.size = fi.Size() + } + + return &fw, nil +} + +// Write writes the buffer p at the current write offset. +func (fw *fileWriter) Write(p []byte) (n int, err error) { + nn, err := fw.readFromAt(bytes.NewReader(p), -1) + return int(nn), err +} + +// WriteAt writes p at the specified offset. The underlying offset does not +// change. +func (fw *fileWriter) WriteAt(p []byte, offset int64) (n int, err error) { + nn, err := fw.readFromAt(bytes.NewReader(p), offset) + return int(nn), err +} + +// ReadFrom reads reader r until io.EOF writing the contents at the current +// offset. +func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { + return fw.readFromAt(r, -1) +} + +// Seek moves the write position do the requested offest based on the whence +// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. +func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { + if fw.err != nil { + return 0, fw.err + } + + var err error + newOffset := fw.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fw.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + // No problems, set the offset. + fw.offset = newOffset + } + + return fw.offset, err +} + +// Close closes the fileWriter for writing. +func (fw *fileWriter) Close() error { + if fw.err != nil { + return fw.err + } + + fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) + + return fw.err +} + +// readFromAt writes to fw from r at the specified offset. If offset is less +// than zero, the value of fw.offset is used and updated after the operation. +func (fw *fileWriter) readFromAt(r io.Reader, offset int64) (n int64, err error) { + if fw.err != nil { + return 0, fw.err + } + + var updateOffset bool + if offset < 0 { + offset = fw.offset + updateOffset = true + } + + nn, err := fw.driver.WriteStream(fw.path, offset, r) + + if updateOffset { + // We should forward the offset, whether or not there was an error. + // Basically, we keep the filewriter in sync with the reader's head. If an + // error is encountered, the whole thing should be retried but we proceed + // from an expected offset, even if the data didn't make it to the + // backend. + fw.offset += nn + + if fw.offset > fw.size { + fw.size = fw.offset + } + } + + return nn, err +} diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go new file mode 100644 index 00000000..2235462f --- /dev/null +++ b/docs/storage/filewriter_test.go @@ -0,0 +1,148 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "io" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storagedriver/inmemory" +) + +// TestSimpleWrite takes the fileWriter through common write operations +// ensuring data integrity. +func TestSimpleWrite(t *testing.T) { + content := make([]byte, 1<<20) + n, err := rand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read did't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + fw, err := newFileWriter(driver, path) + if err != nil { + t.Fatalf("unexpected error creating fileWriter: %v", err) + } + defer fw.Close() + + n, err = fw.Write(content) + if err != nil { + t.Fatalf("unexpected error writing content: %v", err) + } + + if n != len(content) { + t.Fatalf("unexpected write length: %d != %d", n, len(content)) + } + + fr, err := newFileReader(driver, path) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier := digest.NewDigestVerifier(dgst) + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } + + // Check the seek position is equal to the content length + end, err := fw.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(content)) + } + + // Double the content, but use the WriteAt method + doubled := append(content, content...) + doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) + if err != nil { + t.Fatalf("unexpected error digesting doubled content: %v", err) + } + + n, err = fw.WriteAt(content, end) + if err != nil { + t.Fatalf("unexpected error writing content at %d: %v", end, err) + } + + if n != len(content) { + t.Fatalf("writeat was short: %d != %d", n, len(content)) + } + + fr, err = newFileReader(driver, path) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier = digest.NewDigestVerifier(doubledgst) + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } + + // Check that WriteAt didn't update the offset. + end, err = fw.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(content)) + } + + // Now, we copy from one path to another, running the data through the + // fileReader to fileWriter, rather than the driver.Move command to ensure + // everything is working correctly. + fr, err = newFileReader(driver, path) + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + fw, err = newFileWriter(driver, "/copied") + if err != nil { + t.Fatalf("unexpected error creating fileWriter: %v", err) + } + defer fw.Close() + + nn, err := io.Copy(fw, fr) + if err != nil { + t.Fatalf("unexpected error copying data: %v", err) + } + + if nn != int64(len(doubled)) { + t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) + } + + fr, err = newFileReader(driver, "/copied") + if err != nil { + t.Fatalf("unexpected error creating fileReader: %v", err) + } + defer fr.Close() + + verifier = digest.NewDigestVerifier(doubledgst) + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify write data") + } +} diff --git a/docs/storage/layer.go b/docs/storage/layer.go new file mode 100644 index 00000000..5e12f43e --- /dev/null +++ b/docs/storage/layer.go @@ -0,0 +1,90 @@ +package storage + +import ( + "fmt" + "io" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// Layer provides a readable and seekable layer object. Typically, +// implementations are *not* goroutine safe. +type Layer interface { + // http.ServeContent requires an efficient implementation of + // ReadSeeker.Seek(0, os.SEEK_END). + io.ReadSeeker + io.Closer + + // Name returns the repository under which this layer is linked. + Name() string // TODO(stevvooe): struggling with nomenclature: should this be "repo" or "name"? + + // Digest returns the unique digest of the blob, which is the tarsum for + // layers. + Digest() digest.Digest + + // CreatedAt returns the time this layer was created. + CreatedAt() time.Time +} + +// LayerUpload provides a handle for working with in-progress uploads. +// Instances can be obtained from the LayerService.Upload and +// LayerService.Resume. +type LayerUpload interface { + io.WriteSeeker + io.ReaderFrom + io.Closer + + // Name of the repository under which the layer will be linked. + Name() string + + // UUID returns the identifier for this upload. + UUID() string + + // StartedAt returns the time this layer upload was started. + StartedAt() time.Time + + // Finish marks the upload as completed, returning a valid handle to the + // uploaded layer. The digest is validated against the contents of the + // uploaded layer. + Finish(digest digest.Digest) (Layer, error) + + // Cancel the layer upload process. + Cancel() error +} + +var ( + // ErrLayerExists returned when layer already exists + ErrLayerExists = fmt.Errorf("layer exists") + + // ErrLayerTarSumVersionUnsupported when tarsum is unsupported version. + ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version") + + // ErrLayerUploadUnknown returned when upload is not found. + ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") + + // ErrLayerClosed returned when an operation is attempted on a closed + // Layer or LayerUpload. + ErrLayerClosed = fmt.Errorf("layer closed") +) + +// ErrUnknownLayer returned when layer cannot be found. +type ErrUnknownLayer struct { + FSLayer manifest.FSLayer +} + +func (err ErrUnknownLayer) Error() string { + return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum) +} + +// ErrLayerInvalidDigest returned when tarsum check fails. +type ErrLayerInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrLayerInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go new file mode 100644 index 00000000..c7d64b79 --- /dev/null +++ b/docs/storage/layer_test.go @@ -0,0 +1,364 @@ +package storage + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storagedriver" + "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/testutil" + "golang.org/x/net/context" +) + +// TestSimpleLayerUpload covers the layer upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleLayerUpload(t *testing.T) { + randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + dgst := digest.Digest(tarSumStr) + + if err != nil { + t.Fatalf("error allocating upload store: %v", err) + } + + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(driver) + ls := registry.Repository(ctx, imageName).Layers() + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + layerUpload, err := ls.Upload() + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := layerUpload.Cancel(); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // Do a resume, get unknown upload + layerUpload, err = ls.Resume(layerUpload.UUID()) + if err != ErrLayerUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) + } + + // Restart! + layerUpload, err = ls.Upload() + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(layerUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + offset, err := layerUpload.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("unexpected error seeking layer upload: %v", err) + } + + if offset != nn { + t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn) + } + layerUpload.Close() + + // Do a resume, for good fun + layerUpload, err = ls.Resume(layerUpload.UUID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + sha256Digest := digest.NewDigest("sha256", h) + layer, err := layerUpload.Finish(dgst) + + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // After finishing an upload, it should no longer exist. + if _, err := ls.Resume(layerUpload.UUID()); err != ErrLayerUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + exists, err := ls.Exists(layer.Digest()) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v", err) + } + + if !exists { + t.Fatalf("layer should now exist") + } + + h.Reset() + nn, err = io.Copy(h, layer) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) + } +} + +// TestSimpleLayerRead just creates a simple layer file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleLayerRead(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(driver) + ls := registry.Repository(ctx, imageName).Layers() + + randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + dgst := digest.Digest(tarSumStr) + + // Test for existence. + exists, err := ls.Exists(dgst) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v", err) + } + + if exists { + t.Fatalf("layer should not exist") + } + + // Try to get the layer and make sure we get a not found error + layer, err := ls.Fetch(dgst) + if err == nil { + t.Fatalf("error expected fetching unknown layer") + } + + switch err.(type) { + case ErrUnknownLayer: + err = nil + default: + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + randomLayerDigest, err := writeTestLayer(driver, ls.(*layerStore).repository.pm, imageName, dgst, randomLayerReader) + if err != nil { + t.Fatalf("unexpected error writing test layer: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + layer, err = ls.Fetch(dgst) + if err != nil { + t.Fatal(err) + } + defer layer.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, layer) + if err != nil && err != io.EOF { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) + } + + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != randomLayerDigest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest) + } + + // Now seek back the layer, read the whole thing and check against randomLayerData + offset, err := layer.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking layer: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(layer) + if err != nil { + t.Fatalf("error reading all of layer: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry := NewRegistryWithDriver(driver) + ls := registry.Repository(ctx, imageName).Layers() + + upload, err := ls.Upload() + if err != nil { + t.Fatalf("unexpected error starting upload: %v", err) + } + + io.Copy(upload, bytes.NewReader([]byte{})) + + dgst, err := digest.FromTarArchive(bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("error getting zero digest: %v", err) + } + + if dgst != digest.DigestTarSumV1EmptyTar { + // sanity check on zero digest + t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + } + + layer, err := upload.Finish(dgst) + if err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + if layer.Digest() != dgst { + t.Fatalf("unexpected digest: %v != %v", layer.Digest(), dgst) + } +} + +// writeRandomLayer creates a random layer under name and tarSum using driver +// and pathMapper. An io.ReadSeeker with the data is returned, along with the +// sha256 hex digest. +func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) { + reader, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + return nil, "", "", err + } + + tarSum = digest.Digest(tarSumStr) + + // Now, actually create the layer. + randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) + + if _, err := reader.Seek(0, os.SEEK_SET); err != nil { + return nil, "", "", err + } + + return reader, tarSum, randomLayerDigest, err +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// createTestLayer creates a simple test layer in the provided driver under +// tarsum dgst, returning the sha256 digest location. This is implemented +// peicemeal and should probably be replaced by the uploader when it's ready. +func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { + h := sha256.New() + rd := io.TeeReader(content, h) + + p, err := ioutil.ReadAll(rd) + + if err != nil { + return "", nil + } + + blobDigestSHA := digest.NewDigest("sha256", h) + + blobPath, err := pathMapper.path(blobDataPathSpec{ + digest: dgst, + }) + + if err := driver.PutContent(blobPath, p); err != nil { + return "", err + } + + if err != nil { + return "", err + } + + layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ + name: name, + digest: dgst, + }) + + if err != nil { + return "", err + } + + if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil { + return "", nil + } + + return blobDigestSHA, err +} diff --git a/docs/storage/layerhandler.go b/docs/storage/layerhandler.go new file mode 100644 index 00000000..2755470e --- /dev/null +++ b/docs/storage/layerhandler.go @@ -0,0 +1,50 @@ +package storage + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/storagedriver" +) + +// LayerHandler provides middleware for serving the contents of a Layer. +type LayerHandler interface { + // Resolve returns an http.Handler which can serve the contents of a given + // Layer if possible, or nil and an error when unsupported. This may + // directly serve the contents of the layer or issue a redirect to another + // URL hosting the content. + Resolve(layer Layer) (http.Handler, error) +} + +// LayerHandlerInitFunc is the type of a LayerHandler factory function and is +// used to register the contsructor for different LayerHandler backends. +type LayerHandlerInitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) + +var layerHandlers map[string]LayerHandlerInitFunc + +// RegisterLayerHandler is used to register an LayerHandlerInitFunc for +// a LayerHandler backend with the given name. +func RegisterLayerHandler(name string, initFunc LayerHandlerInitFunc) error { + if layerHandlers == nil { + layerHandlers = make(map[string]LayerHandlerInitFunc) + } + if _, exists := layerHandlers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + layerHandlers[name] = initFunc + + return nil +} + +// GetLayerHandler constructs a LayerHandler +// with the given options using the named backend. +func GetLayerHandler(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (LayerHandler, error) { + if layerHandlers != nil { + if initFunc, exists := layerHandlers[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no layer handler registered with name: %s", name) +} diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go new file mode 100644 index 00000000..4510dd7d --- /dev/null +++ b/docs/storage/layerreader.go @@ -0,0 +1,30 @@ +package storage + +import ( + "time" + + "github.com/docker/distribution/digest" +) + +// layerReadSeeker implements Layer and provides facilities for reading and +// seeking. +type layerReader struct { + fileReader + + name string // repo name of this layer + digest digest.Digest +} + +var _ Layer = &layerReader{} + +func (lrs *layerReader) Name() string { + return lrs.name +} + +func (lrs *layerReader) Digest() digest.Digest { + return lrs.digest +} + +func (lrs *layerReader) CreatedAt() time.Time { + return lrs.modtime +} diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go new file mode 100644 index 00000000..b6578792 --- /dev/null +++ b/docs/storage/layerstore.go @@ -0,0 +1,168 @@ +package storage + +import ( + "time" + + "code.google.com/p/go-uuid/uuid" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/storagedriver" +) + +type layerStore struct { + repository *repository +} + +func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { + ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") + + // Because this implementation just follows blob links, an existence check + // is pretty cheap by starting and closing a fetch. + _, err := ls.Fetch(digest) + + if err != nil { + switch err.(type) { + case ErrUnknownLayer: + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ls *layerStore) Fetch(dgst digest.Digest) (Layer, error) { + ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") + bp, err := ls.path(dgst) + if err != nil { + return nil, err + } + + fr, err := newFileReader(ls.repository.driver, bp) + if err != nil { + return nil, err + } + + return &layerReader{ + fileReader: *fr, + name: ls.repository.Name(), + digest: dgst, + }, nil +} + +// Upload begins a layer upload, returning a handle. If the layer upload +// is already in progress or the layer has already been uploaded, this +// will return an error. +func (ls *layerStore) Upload() (LayerUpload, error) { + ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") + + // NOTE(stevvooe): Consider the issues with allowing concurrent upload of + // the same two layers. Should it be disallowed? For now, we allow both + // parties to proceed and the the first one uploads the layer. + + uuid := uuid.New() + startedAt := time.Now().UTC() + + path, err := ls.repository.registry.pm.path(uploadDataPathSpec{ + name: ls.repository.Name(), + uuid: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ + name: ls.repository.Name(), + uuid: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return ls.newLayerUpload(uuid, path, startedAt) +} + +// Resume continues an in progress layer upload, returning the current +// state of the upload. +func (ls *layerStore) Resume(uuid string) (LayerUpload, error) { + ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") + startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ + name: ls.repository.Name(), + uuid: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return nil, ErrLayerUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := ls.repository.pm.path(uploadDataPathSpec{ + name: ls.repository.Name(), + uuid: uuid, + }) + + if err != nil { + return nil, err + } + + return ls.newLayerUpload(uuid, path, startedAt) +} + +// newLayerUpload allocates a new upload controller with the given state. +func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (LayerUpload, error) { + fw, err := newFileWriter(ls.repository.driver, path) + if err != nil { + return nil, err + } + + return &layerUploadController{ + layerStore: ls, + uuid: uuid, + startedAt: startedAt, + fileWriter: *fw, + }, nil +} + +func (ls *layerStore) path(dgst digest.Digest) (string, error) { + // We must traverse this path through the link to enforce ownership. + layerLinkPath, err := ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) + if err != nil { + return "", err + } + + blobPath, err := ls.repository.blobStore.resolve(layerLinkPath) + + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return "", ErrUnknownLayer{manifest.FSLayer{BlobSum: dgst}} + default: + return "", err + } + } + + return blobPath, nil +} diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go new file mode 100644 index 00000000..54860913 --- /dev/null +++ b/docs/storage/layerupload.go @@ -0,0 +1,238 @@ +package storage + +import ( + "fmt" + "io" + "path" + "time" + + "github.com/Sirupsen/logrus" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storagedriver" + "github.com/docker/docker/pkg/tarsum" +) + +// layerUploadController is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type layerUploadController struct { + layerStore *layerStore + + uuid string + startedAt time.Time + + fileWriter +} + +var _ LayerUpload = &layerUploadController{} + +// Name of the repository under which the layer will be linked. +func (luc *layerUploadController) Name() string { + return luc.layerStore.repository.Name() +} + +// UUID returns the identifier for this upload. +func (luc *layerUploadController) UUID() string { + return luc.uuid +} + +func (luc *layerUploadController) StartedAt() time.Time { + return luc.startedAt +} + +// Finish marks the upload as completed, returning a valid handle to the +// uploaded layer. The final size and checksum are validated against the +// contents of the uploaded layer. The checksum should be provided in the +// format :. +func (luc *layerUploadController) Finish(digest digest.Digest) (Layer, error) { + ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Finish") + canonical, err := luc.validateLayer(digest) + if err != nil { + return nil, err + } + + if err := luc.moveLayer(canonical); err != nil { + // TODO(stevvooe): Cleanup? + return nil, err + } + + // Link the layer blob into the repository. + if err := luc.linkLayer(canonical); err != nil { + return nil, err + } + + if err := luc.removeResources(); err != nil { + return nil, err + } + + return luc.layerStore.Fetch(canonical) +} + +// Cancel the layer upload process. +func (luc *layerUploadController) Cancel() error { + ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Cancel") + if err := luc.removeResources(); err != nil { + return err + } + + luc.Close() + return nil +} + +// validateLayer checks the layer data against the digest, returning an error +// if it does not match. The canonical digest is returned. +func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) { + // First, check the incoming tarsum version of the digest. + version, err := tarsum.GetVersionFromTarsum(dgst.String()) + if err != nil { + return "", err + } + + // TODO(stevvooe): Should we push this down into the digest type? + switch version { + case tarsum.Version1: + default: + // version 0 and dev, for now. + return "", ErrLayerInvalidDigest{ + Digest: dgst, + Reason: ErrLayerTarSumVersionUnsupported, + } + } + + digestVerifier := digest.NewDigestVerifier(dgst) + + // TODO(stevvooe): Store resumable hash calculations in upload directory + // in driver. Something like a file at path /resumablehash/ + // with the hash state up to that point would be perfect. The hasher would + // then only have to fetch the difference. + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(luc.fileWriter.driver, luc.path) + if err != nil { + return "", err + } + + tr := io.TeeReader(fr, digestVerifier) + + // TODO(stevvooe): This is one of the places we need a Digester write + // sink. Instead, its read driven. This might be okay. + + // Calculate an updated digest with the latest version. + canonical, err := digest.FromTarArchive(tr) + if err != nil { + return "", err + } + + if !digestVerifier.Verified() { + return "", ErrLayerInvalidDigest{ + Digest: dgst, + Reason: fmt.Errorf("content does not match digest"), + } + } + + return canonical, nil +} + +// moveLayer moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { + blobPath, err := luc.layerStore.repository.registry.pm.path(blobDataPathSpec{ + digest: dgst, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := luc.driver.Stat(blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := luc.driver.Stat(luc.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the zero tarsum. + if dgst == digest.DigestTarSumV1EmptyTar { + return luc.driver.PutContent(blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.uuid", luc.UUID()). + WithField("digest", dgst).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + return luc.driver.Move(luc.path, blobPath) +} + +// linkLayer links a valid, written layer blob into the registry under the +// named repository for the upload controller. +func (luc *layerUploadController) linkLayer(digest digest.Digest) error { + layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{ + name: luc.Name(), + digest: digest, + }) + + if err != nil { + return err + } + + return luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(digest)) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (luc *layerUploadController) removeResources() error { + dataPath, err := luc.layerStore.repository.registry.pm.path(uploadDataPathSpec{ + name: luc.Name(), + uuid: luc.uuid, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + + if err := luc.driver.Delete(dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + logrus.Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go new file mode 100644 index 00000000..1f798dde --- /dev/null +++ b/docs/storage/manifeststore.go @@ -0,0 +1,190 @@ +package storage + +import ( + "fmt" + "strings" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +// ErrUnknownRepository is returned if the named repository is not known by +// the registry. +type ErrUnknownRepository struct { + Name string +} + +func (err ErrUnknownRepository) Error() string { + return fmt.Sprintf("unknown respository name=%s", err.Name) +} + +// ErrUnknownManifest is returned if the manifest is not known by the +// registry. +type ErrUnknownManifest struct { + Name string + Tag string +} + +func (err ErrUnknownManifest) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrUnknownManifestRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrUnknownManifestRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrUnknownManifestRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +type manifestStore struct { + repository *repository + + revisionStore *revisionStore + tagStore *tagStore +} + +var _ ManifestService = &manifestStore{} + +// func (ms *manifestStore) Repository() Repository { +// return ms.repository +// } + +func (ms *manifestStore) Tags() ([]string, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") + return ms.tagStore.tags() +} + +func (ms *manifestStore) Exists(tag string) (bool, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") + return ms.tagStore.exists(tag) +} + +func (ms *manifestStore) Get(tag string) (*manifest.SignedManifest, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") + dgst, err := ms.tagStore.resolve(tag) + if err != nil { + return nil, err + } + + return ms.revisionStore.get(dgst) +} + +func (ms *manifestStore) Put(tag string, manifest *manifest.SignedManifest) error { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") + + // TODO(stevvooe): Add check here to see if the revision is already + // present in the repository. If it is, we should merge the signatures, do + // a shallow verify (or a full one, doesn't matter) and return an error + // indicating what happened. + + // Verify the manifest. + if err := ms.verifyManifest(tag, manifest); err != nil { + return err + } + + // Store the revision of the manifest + revision, err := ms.revisionStore.put(manifest) + if err != nil { + return err + } + + // Now, tag the manifest + return ms.tagStore.tag(tag, revision) +} + +// Delete removes all revisions of the given tag. We may want to change these +// semantics in the future, but this will maintain consistency. The underlying +// blobs are left alone. +func (ms *manifestStore) Delete(tag string) error { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete") + + revisions, err := ms.tagStore.revisions(tag) + if err != nil { + return err + } + + for _, revision := range revisions { + if err := ms.revisionStore.delete(revision); err != nil { + return err + } + } + + return ms.tagStore.delete(tag) +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the name and tag match and +// that the signature is valid for the enclosed payload. As a policy, the +// registry only tries to store valid content, leaving trust policies of that +// content up to consumers. +func (ms *manifestStore) verifyManifest(tag string, mnfst *manifest.SignedManifest) error { + var errs ErrManifestVerification + if mnfst.Name != ms.repository.Name() { + // TODO(stevvooe): This needs to be an exported error + errs = append(errs, fmt.Errorf("repository name does not match manifest name")) + } + + if mnfst.Tag != tag { + // TODO(stevvooe): This needs to be an exported error. + errs = append(errs, fmt.Errorf("tag does not match manifest tag")) + } + + if _, err := manifest.Verify(mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + for _, fsLayer := range mnfst.FSLayers { + exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum) + if err != nil { + errs = append(errs, err) + } + + if !exists { + errs = append(errs, ErrUnknownLayer{FSLayer: fsLayer}) + } + } + + if len(errs) != 0 { + // TODO(stevvooe): These need to be recoverable by a caller. + return errs + } + + return nil +} diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go new file mode 100644 index 00000000..8284ce94 --- /dev/null +++ b/docs/storage/manifeststore_test.go @@ -0,0 +1,233 @@ +package storage + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +func TestManifestStorage(t *testing.T) { + ctx := context.Background() + name := "foo/bar" + tag := "thetag" + driver := inmemory.New() + registry := NewRegistryWithDriver(driver) + repo := registry.Repository(ctx, name) + ms := repo.Manifests() + + exists, err := ms.Exists(tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if exists { + t.Fatalf("manifest should not exist") + } + + if _, err := ms.Get(tag); true { + switch err.(type) { + case ErrUnknownManifest: + break + default: + t.Fatalf("expected manifest unknown error: %#v", err) + } + } + + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + // Build up some test layers and add them to the manifest, saving the + // readseekers for upload later. + testLayers := map[digest.Digest]io.ReadSeeker{} + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ds) + + testLayers[digest.Digest(dgst)] = rs + m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + BlobSum: dgst, + }) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + err = ms.Put(tag, sm) + if err == nil { + t.Fatalf("expected errors putting manifest") + } + + // TODO(stevvooe): We expect errors describing all of the missing layers. + + // Now, upload the layers that were missing! + for dgst, rs := range testLayers { + upload, err := repo.Layers().Upload() + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + if _, err := io.Copy(upload, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := upload.Finish(dgst); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + if err = ms.Put(tag, sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err = ms.Exists(tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fetchedManifest, err := ms.Get(tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if !reflect.DeepEqual(fetchedManifest, sm) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + payload, err := fetchedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting payload: %v", err) + } + + sigs, err := fetchedJWS.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + } + + // Grabs the tags and check that this tagged manifest is present + tags, err := ms.Tags() + if err != nil { + t.Fatalf("unexpected error fetching tags: %v", err) + } + + if len(tags) != 1 { + t.Fatalf("unexpected tags returned: %v", tags) + } + + if tags[0] != tag { + t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{tag}) + } + + // Now, push the same manifest with a different key + pk2, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm2, err := manifest.Sign(&m, pk2) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") + if err != nil { + t.Fatalf("error parsing signature: %v", err) + } + + sigs2, err := jws2.Signatures() + if err != nil { + t.Fatalf("unable to extract signatures: %v", err) + } + + if len(sigs2) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) + } + + if err = ms.Put(tag, sm2); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + fetched, err := ms.Get(tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if _, err := manifest.Verify(fetched); err != nil { + t.Fatalf("unexpected error verifying manifest: %v", err) + } + + // Assemble our payload and two signatures to get what we expect! + expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) + if err != nil { + t.Fatalf("unexpected error merging jws: %v", err) + } + + expectedSigs, err := expectedJWS.Signatures() + if err != nil { + t.Fatalf("unexpected error getting expected signatures: %v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") + if err != nil { + t.Fatalf("unexpected error parsing jws: %v", err) + } + + receivedPayload, err := receivedJWS.Payload() + if err != nil { + t.Fatalf("unexpected error extracting received payload: %v", err) + } + + if !bytes.Equal(receivedPayload, payload) { + t.Fatalf("payloads are not equal") + } + + receivedSigs, err := receivedJWS.Signatures() + if err != nil { + t.Fatalf("error getting signatures: %v", err) + } + + for i, sig := range receivedSigs { + if !bytes.Equal(sig, expectedSigs[i]) { + t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + } + } + + if err := ms.Delete(tag); err != nil { + t.Fatalf("unexpected error deleting manifest: %v", err) + } +} diff --git a/docs/storage/notifications/bridge.go b/docs/storage/notifications/bridge.go new file mode 100644 index 00000000..217ee5bd --- /dev/null +++ b/docs/storage/notifications/bridge.go @@ -0,0 +1,156 @@ +package notifications + +import ( + "net/http" + "time" + + "github.com/docker/distribution/manifest" + + "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage" +) + +type bridge struct { + ub URLBuilder + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink +} + +var _ Listener = &bridge{} + +// URLBuilder defines a subset of url builder to be used by the event listener. +type URLBuilder interface { + BuildManifestURL(name, tag string) (string, error) + BuildBlobURL(name string, dgst digest.Digest) (string, error) +} + +// NewBridge returns a notification listener that writes records to sink, +// using the actor and source. Any urls populated in the events created by +// this bridge will be created using the URLBuilder. +// TODO(stevvooe): Update this to simply take a context.Context object. +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { + return &bridge{ + ub: ub, + actor: actor, + source: source, + request: request, + sink: sink, + } +} + +// NewRequestRecord builds a RequestRecord for use in NewBridge from an +// http.Request, associating it with a request id. +func NewRequestRecord(id string, r *http.Request) RequestRecord { + return RequestRecord{ + ID: id, + Addr: r.RemoteAddr, + Host: r.Host, + Method: r.Method, + UserAgent: r.UserAgent(), + } +} + +func (b *bridge) ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionPush, repo, sm) +} + +func (b *bridge) ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionPull, repo, sm) +} + +func (b *bridge) ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error { + return b.createManifestEventAndWrite(EventActionDelete, repo, sm) +} + +func (b *bridge) LayerPushed(repo storage.Repository, layer storage.Layer) error { + return b.createLayerEventAndWrite(EventActionPush, repo, layer.Digest()) +} + +func (b *bridge) LayerPulled(repo storage.Repository, layer storage.Layer) error { + return b.createLayerEventAndWrite(EventActionPull, repo, layer.Digest()) +} + +func (b *bridge) LayerDeleted(repo storage.Repository, layer storage.Layer) error { + return b.createLayerEventAndWrite(EventActionDelete, repo, layer.Digest()) +} + +func (b *bridge) createManifestEventAndWrite(action string, repo storage.Repository, sm *manifest.SignedManifest) error { + event, err := b.createManifestEvent(action, repo, sm) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo storage.Repository, sm *manifest.SignedManifest) (*Event, error) { + event := b.createEvent(action) + event.Target.Type = EventTargetTypeManifest + event.Target.Name = repo.Name() + event.Target.Tag = sm.Tag + + p, err := sm.Payload() + if err != nil { + return nil, err + } + + event.Target.Digest, err = digest.FromBytes(p) + if err != nil { + return nil, err + } + + // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is + // implemented, this should be replaced. + event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) + if err != nil { + return nil, err + } + + return event, nil +} + +func (b *bridge) createLayerEventAndWrite(action string, repo storage.Repository, dgst digest.Digest) error { + event, err := b.createLayerEvent(action, repo, dgst) + if err != nil { + return err + } + + return b.sink.Write(*event) +} + +func (b *bridge) createLayerEvent(action string, repo storage.Repository, dgst digest.Digest) (*Event, error) { + event := b.createEvent(action) + event.Target.Type = EventTargetTypeBlob + event.Target.Name = repo.Name() + event.Target.Digest = dgst + + var err error + event.Target.URL, err = b.ub.BuildBlobURL(repo.Name(), dgst) + if err != nil { + return nil, err + } + + return event, nil +} + +// createEvent creates an event with actor and source populated. +func (b *bridge) createEvent(action string) *Event { + event := createEvent(action) + event.Source = b.source + event.Actor = b.actor + event.Request = b.request + + return event +} + +// createEvent returns a new event, timestamped, with the specified action. +func createEvent(action string) *Event { + return &Event{ + ID: uuid.New(), + Timestamp: time.Now(), + Action: action, + } +} diff --git a/docs/storage/notifications/endpoint.go b/docs/storage/notifications/endpoint.go new file mode 100644 index 00000000..dfdb111c --- /dev/null +++ b/docs/storage/notifications/endpoint.go @@ -0,0 +1,86 @@ +package notifications + +import ( + "net/http" + "time" +) + +// EndpointConfig covers the optional configuration parameters for an active +// endpoint. +type EndpointConfig struct { + Headers http.Header + Timeout time.Duration + Threshold int + Backoff time.Duration +} + +// defaults set any zero-valued fields to a reasonable default. +func (ec *EndpointConfig) defaults() { + if ec.Timeout <= 0 { + ec.Timeout = time.Second + } + + if ec.Threshold <= 0 { + ec.Threshold = 10 + } + + if ec.Backoff <= 0 { + ec.Backoff = time.Second + } +} + +// Endpoint is a reliable, queued, thread-safe sink that notify external http +// services when events are written. Writes are non-blocking and always +// succeed for callers but events may be queued internally. +type Endpoint struct { + Sink + url string + name string + + EndpointConfig + + metrics *safeMetrics +} + +// NewEndpoint returns a running endpoint, ready to receive events. +func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { + var endpoint Endpoint + endpoint.name = name + endpoint.url = url + endpoint.EndpointConfig = config + endpoint.defaults() + endpoint.metrics = newSafeMetrics() + + // Configures the inmemory queue, retry, http pipeline. + endpoint.Sink = newHTTPSink( + endpoint.url, endpoint.Timeout, endpoint.Headers, + endpoint.metrics.httpStatusListener()) + endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) + endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) + + register(&endpoint) + return &endpoint +} + +// Name returns the name of the endpoint, generally used for debugging. +func (e *Endpoint) Name() string { + return e.name +} + +// URL returns the url of the endpoint. +func (e *Endpoint) URL() string { + return e.url +} + +// ReadMetrics populates em with metrics from the endpoint. +func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { + e.metrics.Lock() + defer e.metrics.Unlock() + + *em = e.metrics.EndpointMetrics + // Map still need to copied in a threadsafe manner. + em.Statuses = make(map[string]int) + for k, v := range e.metrics.Statuses { + em.Statuses[k] = v + } +} diff --git a/docs/storage/notifications/event.go b/docs/storage/notifications/event.go new file mode 100644 index 00000000..c23766fa --- /dev/null +++ b/docs/storage/notifications/event.go @@ -0,0 +1,154 @@ +package notifications + +import ( + "fmt" + "time" + + "github.com/docker/distribution/digest" +) + +// EventAction constants used in action field of Event. +const ( + EventActionPull = "pull" + EventActionPush = "push" + EventActionDelete = "delete" +) + +// EventTargetType constants used in Target section of Event. +const ( + EventTargetTypeManifest = "manifest" + EventTargetTypeBlob = "blob" +) + +// EventsMediaType is the mediatype for the json event envelope. If the Event, +// ActorRecord, SourceRecord or Envelope structs change, the version number +// should be incremented. +const EventsMediaType = "application/vnd.docker.distribution.events.v1+json" + +// Envelope defines the fields of a json event envelope message that can hold +// one or more events. +type Envelope struct { + // Events make up the contents of the envelope. Events present in a single + // envelope are not necessarily related. + Events []Event `json:"events,omitempty"` +} + +// TODO(stevvooe): The event type should be separate from the json format. It +// should be defined as an interface. Leaving as is for now since we don't +// need that at this time. If we make this change, the struct below would be +// called "EventRecord". + +// Event provides the fields required to describe a registry event. +type Event struct { + // ID provides a unique identifier for the event. + ID string `json:"id,omitempty"` + + // Timestamp is the time at which the event occurred. + Timestamp time.Time `json:"timestamp,omitempty"` + + // Action indicates what action encompasses the provided event. + Action string `json:"action,omitempty"` + + // Target uniquely describes the target of the event. + Target struct { + // Type should be "manifest" or "blob" + Type string `json:"type,omitempty"` + + // Name identifies the named repository. + Name string `json:"name,omitempty"` + + // Digest should identify the object in the repository. + Digest digest.Digest `json:"digest,omitempty"` + + // Tag is present if the operation involved a tagged manifest. + Tag string `json:"tag,omitempty"` + + // URL provides a link to the content on the relevant repository instance. + URL string `json:"url,omitempty"` + } `json:"target,omitempty"` + + // Request covers the request that generated the event. + Request RequestRecord `json:"request,omitempty"` + + // Actor specifies the agent that initiated the event. For most + // situations, this could be from the authorizaton context of the request. + Actor ActorRecord `json:"actor,omitempty"` + + // Source identifies the registry node that generated the event. Put + // differently, while the actor "initiates" the event, the source + // "generates" it. + Source SourceRecord `json:"source,omitempty"` +} + +// ActorRecord specifies the agent that initiated the event. For most +// situations, this could be from the authorizaton context of the request. +// Data in this record can refer to both the initiating client and the +// generating request. +type ActorRecord struct { + // Name corresponds to the subject or username associated with the + // request context that generated the event. + Name string `json:"name,omitempty"` + + // TODO(stevvooe): Look into setting a session cookie to get this + // without docker daemon. + // SessionID + + // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and + // get the actual command. + // Command +} + +// RequestRecord covers the request that generated the event. +type RequestRecord struct { + // ID uniquely identifies the request that initiated the event. + ID string `json:"id"` + + // Addr contains the ip or hostname and possibly port of the client + // connection that initiated the event. This is the RemoteAddr from + // the standard http request. + Addr string `json:"addr,omitempty"` + + // Host is the externally accessible host name of the registry instance, + // as specified by the http host header on incoming requests. + Host string `json:"host,omitempty"` + + // Method has the request method that generated the event. + Method string `json:"method"` + + // UserAgent contains the user agent header of the request. + UserAgent string `json:"useragent"` +} + +// SourceRecord identifies the registry node that generated the event. Put +// differently, while the actor "initiates" the event, the source "generates" +// it. +type SourceRecord struct { + // Addr contains the ip or hostname and the port of the registry node + // that generated the event. Generally, this will be resolved by + // os.Hostname() along with the running port. + Addr string `json:"addr,omitempty"` + + // InstanceID identifies a running instance of an application. Changes + // after each restart. + InstanceID string `json:"instanceID,omitempty"` +} + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("sink: closed") +) + +// Sink accepts and sends events. +type Sink interface { + // Write writes one or more events to the sink. If no error is returned, + // the caller will assume that all events have been committed and will not + // try to send them again. If an error is received, the caller may retry + // sending the event. The caller should cede the slice of memory to the + // sink and not modify it after calling this method. + Write(events ...Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/docs/storage/notifications/event_test.go b/docs/storage/notifications/event_test.go new file mode 100644 index 00000000..cc2180ac --- /dev/null +++ b/docs/storage/notifications/event_test.go @@ -0,0 +1,145 @@ +package notifications + +import ( + "encoding/json" + "strings" + "testing" + "time" +) + +// TestEventJSONFormat provides silly test to detect if the event format or +// envelope has changed. If this code fails, the revision of the protocol may +// need to be incremented. +func TestEventEnvelopeJSONFormat(t *testing.T) { + var expected = strings.TrimSpace(` +{ + "events": [ + { + "id": "asdf-asdf-asdf-asdf-0", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "type": "manifest", + "name": "library/test", + "digest": "sha256:0123456789abcdef0", + "tag": "latest", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-1", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "type": "blob", + "name": "library/test", + "digest": "tarsum.v2+sha256:0123456789abcdef1", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + }, + { + "id": "asdf-asdf-asdf-asdf-2", + "timestamp": "2006-01-02T15:04:05Z", + "action": "push", + "target": { + "type": "blob", + "name": "library/test", + "digest": "tarsum.v2+sha256:0123456789abcdef2", + "url": "http://example.com/v2/library/test/manifests/latest" + }, + "request": { + "id": "asdfasdf", + "addr": "client.local", + "host": "registrycluster.local", + "method": "PUT", + "useragent": "test/0.1" + }, + "actor": { + "name": "test-actor" + }, + "source": { + "addr": "hostname.local:port" + } + } + ] +} + `) + + tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + t.Fatalf("error creating time: %v", err) + } + + var prototype Event + prototype.Action = "push" + prototype.Timestamp = tm + prototype.Actor.Name = "test-actor" + prototype.Request.ID = "asdfasdf" + prototype.Request.Addr = "client.local" + prototype.Request.Host = "registrycluster.local" + prototype.Request.Method = "PUT" + prototype.Request.UserAgent = "test/0.1" + prototype.Source.Addr = "hostname.local:port" + + var manifestPush Event + manifestPush = prototype + manifestPush.ID = "asdf-asdf-asdf-asdf-0" + manifestPush.Target.Digest = "sha256:0123456789abcdef0" + manifestPush.Target.Type = EventTargetTypeManifest + manifestPush.Target.Name = "library/test" + manifestPush.Target.Tag = "latest" + manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush0 Event + layerPush0 = prototype + layerPush0.ID = "asdf-asdf-asdf-asdf-1" + layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" + layerPush0.Target.Type = EventTargetTypeBlob + layerPush0.Target.Name = "library/test" + layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var layerPush1 Event + layerPush1 = prototype + layerPush1.ID = "asdf-asdf-asdf-asdf-2" + layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" + layerPush1.Target.Type = EventTargetTypeBlob + layerPush1.Target.Name = "library/test" + layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" + + var envelope Envelope + envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling envelope: %v", err) + } + if string(p) != expected { + t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) + } +} diff --git a/docs/storage/notifications/http.go b/docs/storage/notifications/http.go new file mode 100644 index 00000000..15b3574c --- /dev/null +++ b/docs/storage/notifications/http.go @@ -0,0 +1,145 @@ +package notifications + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" +) + +// httpSink implements a single-flight, http notification endpoint. This is +// very lightweight in that it only makes an attempt at an http request. +// Reliability should be provided by the caller. +type httpSink struct { + url string + + mu sync.Mutex + closed bool + client *http.Client + listeners []httpStatusListener + + // TODO(stevvooe): Allow one to configure the media type accepted by this + // sink and choose the serialization based on that. +} + +// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other +// sinks for increased reliability. +func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { + return &httpSink{ + url: u, + listeners: listeners, + client: &http.Client{ + Transport: &headerRoundTripper{ + Transport: http.DefaultTransport.(*http.Transport), + headers: headers, + }, + Timeout: timeout, + }, + } +} + +// httpStatusListener is called on various outcomes of sending notifications. +type httpStatusListener interface { + success(status int, events ...Event) + failure(status int, events ...Event) + err(err error, events ...Event) +} + +// Accept makes an attempt to notify the endpoint, returning an error if it +// fails. It is the caller's responsibility to retry on error. The events are +// accepted or rejected as a group. +func (hs *httpSink) Write(events ...Event) error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return ErrSinkClosed + } + + envelope := Envelope{ + Events: events, + } + + // TODO(stevvooe): It is not ideal to keep re-encoding the request body on + // retry but we are going to do it to keep the code simple. It is likely + // we could change the event struct to manage its own buffer. + + p, err := json.MarshalIndent(envelope, "", " ") + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) + } + + body := bytes.NewReader(p) + resp, err := hs.client.Post(hs.url, EventsMediaType, body) + if err != nil { + for _, listener := range hs.listeners { + listener.err(err, events...) + } + + return fmt.Errorf("%v: error posting: %v", hs, err) + } + + // The notifier will treat any 2xx or 3xx response as accepted by the + // endpoint. + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + for _, listener := range hs.listeners { + listener.success(resp.StatusCode, events...) + } + + // TODO(stevvooe): This is a little accepting: we may want to support + // unsupported media type responses with retries using the correct + // media type. There may also be cases that will never work. + + return nil + default: + for _, listener := range hs.listeners { + listener.failure(resp.StatusCode, events...) + } + return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) + } +} + +// Close the endpoint +func (hs *httpSink) Close() error { + hs.mu.Lock() + defer hs.mu.Unlock() + + if hs.closed { + return fmt.Errorf("httpsink: already closed") + } + + hs.closed = true + return nil +} + +func (hs *httpSink) String() string { + return fmt.Sprintf("httpSink{%s}", hs.url) +} + +type headerRoundTripper struct { + *http.Transport // must be transport to support CancelRequest + headers http.Header +} + +func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var nreq http.Request + nreq = *req + nreq.Header = make(http.Header) + + merge := func(headers http.Header) { + for k, v := range headers { + nreq.Header[k] = append(nreq.Header[k], v...) + } + } + + merge(req.Header) + merge(hrt.headers) + + return hrt.Transport.RoundTrip(&nreq) +} diff --git a/docs/storage/notifications/http_test.go b/docs/storage/notifications/http_test.go new file mode 100644 index 00000000..c2cfbc02 --- /dev/null +++ b/docs/storage/notifications/http_test.go @@ -0,0 +1,155 @@ +package notifications + +import ( + "encoding/json" + "fmt" + "mime" + "net/http" + "net/http/httptest" + "reflect" + "strconv" + "testing" +) + +// TestHTTPSink mocks out an http endpoint and notifies it under a couple of +// conditions, ensuring correct behavior. +func TestHTTPSink(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + t.Fatalf("unexpected request method: %v", r.Method) + return + } + + // Extract the content type and make sure it matches + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) + return + } + + if mediaType != EventsMediaType { + w.WriteHeader(http.StatusUnsupportedMediaType) + t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) + return + } + + var envelope Envelope + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&envelope); err != nil { + w.WriteHeader(http.StatusBadRequest) + t.Fatalf("error decoding request body: %v", err) + return + } + + // Let caller choose the status + status, err := strconv.Atoi(r.FormValue("status")) + if err != nil { + t.Logf("error parsing status: %v", err) + + // May just be empty, set status to 200 + status = http.StatusOK + } + + w.WriteHeader(status) + })) + + metrics := newSafeMetrics() + sink := newHTTPSink(server.URL, 0, nil, + &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) + + var expectedMetrics EndpointMetrics + expectedMetrics.Statuses = make(map[string]int) + + for _, tc := range []struct { + events []Event // events to send + url string + failure bool // true if there should be a failure. + statusCode int // if not set, no status code should be incremented. + }{ + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", "manifest")}, + }, + { + statusCode: http.StatusOK, + events: []Event{ + createTestEvent("push", "library/test", "manifest"), + createTestEvent("push", "library/test", "layer"), + createTestEvent("push", "library/test", "layer"), + }, + }, + { + statusCode: http.StatusTemporaryRedirect, + }, + { + statusCode: http.StatusBadRequest, + failure: true, + }, + { + // Case where connection never goes through. + url: "http://shoudlntresolve/", + failure: true, + }, + } { + + if tc.failure { + expectedMetrics.Failures += len(tc.events) + } else { + expectedMetrics.Successes += len(tc.events) + } + + if tc.statusCode > 0 { + expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) + } + + url := tc.url + if url == "" { + url = server.URL + "/" + } + // setup endpoint to respond with expected status code. + url += fmt.Sprintf("?status=%v", tc.statusCode) + sink.url = url + + t.Logf("testcase: %v, fail=%v", url, tc.failure) + // Try a simple event emission. + err := sink.Write(tc.events...) + + if !tc.failure { + if err != nil { + t.Fatalf("unexpected error send event: %v", err) + } + } else { + if err == nil { + t.Fatalf("the endpoint should have rejected the request") + } + } + + if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { + t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) + } + } + + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing http sink: %v", err) + } + + // double close returns error + if err := sink.Close(); err == nil { + t.Fatalf("second close should have returned error: %v", err) + } + +} + +func createTestEvent(action, repo, typ string) Event { + event := createEvent(action) + + event.Target.Type = typ + event.Target.Name = repo + + return *event +} diff --git a/docs/storage/notifications/listener.go b/docs/storage/notifications/listener.go new file mode 100644 index 00000000..99a06f02 --- /dev/null +++ b/docs/storage/notifications/listener.go @@ -0,0 +1,140 @@ +package notifications + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage" +) + +// ManifestListener describes a set of methods for listening to events related to manifests. +type ManifestListener interface { + ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error + ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error +} + +// LayerListener describes a listener that can respond to layer related events. +type LayerListener interface { + LayerPushed(repo storage.Repository, layer storage.Layer) error + LayerPulled(repo storage.Repository, layer storage.Layer) error + + // TODO(stevvooe): Please note that delete support is still a little shaky + // and we'll need to propagate these in the future. + + LayerDeleted(repo storage.Repository, layer storage.Layer) error +} + +// Listener combines all repository events into a single interface. +type Listener interface { + ManifestListener + LayerListener +} + +type repositoryListener struct { + storage.Repository + listener Listener +} + +// Listen dispatches events on the repository to the listener. +func Listen(repo storage.Repository, listener Listener) storage.Repository { + return &repositoryListener{ + Repository: repo, + listener: listener, + } +} + +func (rl *repositoryListener) Manifests() storage.ManifestService { + return &manifestServiceListener{ + ManifestService: rl.Repository.Manifests(), + parent: rl, + } +} + +func (rl *repositoryListener) Layers() storage.LayerService { + return &layerServiceListener{ + LayerService: rl.Repository.Layers(), + parent: rl, + } +} + +type manifestServiceListener struct { + storage.ManifestService + parent *repositoryListener +} + +func (msl *manifestServiceListener) Get(tag string) (*manifest.SignedManifest, error) { + sm, err := msl.ManifestService.Get(tag) + if err == nil { + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { + logrus.Errorf("error dispatching manifest pull to listener: %v", err) + } + } + + return sm, err +} + +func (msl *manifestServiceListener) Put(tag string, sm *manifest.SignedManifest) error { + err := msl.ManifestService.Put(tag, sm) + + if err == nil { + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository, sm); err != nil { + logrus.Errorf("error dispatching manifest push to listener: %v", err) + } + } + + return err +} + +type layerServiceListener struct { + storage.LayerService + parent *repositoryListener +} + +func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (storage.Layer, error) { + layer, err := lsl.LayerService.Fetch(dgst) + if err == nil { + if err := lsl.parent.listener.LayerPulled(lsl.parent.Repository, layer); err != nil { + logrus.Errorf("error dispatching layer pull to listener: %v", err) + } + } + + return layer, err +} + +func (lsl *layerServiceListener) Upload() (storage.LayerUpload, error) { + lu, err := lsl.LayerService.Upload() + return lsl.decorateUpload(lu), err +} + +func (lsl *layerServiceListener) Resume(uuid string) (storage.LayerUpload, error) { + lu, err := lsl.LayerService.Resume(uuid) + return lsl.decorateUpload(lu), err +} + +func (lsl *layerServiceListener) decorateUpload(lu storage.LayerUpload) storage.LayerUpload { + return &layerUploadListener{ + LayerUpload: lu, + parent: lsl, + } +} + +type layerUploadListener struct { + storage.LayerUpload + parent *layerServiceListener +} + +func (lul *layerUploadListener) Finish(dgst digest.Digest) (storage.Layer, error) { + layer, err := lul.LayerUpload.Finish(dgst) + if err == nil { + if err := lul.parent.parent.listener.LayerPushed(lul.parent.parent.Repository, layer); err != nil { + logrus.Errorf("error dispatching layer push to listener: %v", err) + } + } + + return layer, err +} diff --git a/docs/storage/notifications/listener_test.go b/docs/storage/notifications/listener_test.go new file mode 100644 index 00000000..b62e7e7e --- /dev/null +++ b/docs/storage/notifications/listener_test.go @@ -0,0 +1,153 @@ +package notifications + +import ( + "io" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +func TestListener(t *testing.T) { + registry := storage.NewRegistryWithDriver(inmemory.New()) + tl := &testListener{ + ops: make(map[string]int), + } + ctx := context.Background() + repository := Listen(registry.Repository(ctx, "foo/bar"), tl) + + // Now take the registry through a number of operations + checkExerciseRepository(t, repository) + + expectedOps := map[string]int{ + "manifest:push": 1, + "manifest:pull": 1, + // "manifest:delete": 0, // deletes not supported for now + "layer:push": 2, + "layer:pull": 2, + // "layer:delete": 0, // deletes not supported for now + } + + if !reflect.DeepEqual(tl.ops, expectedOps) { + t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) + } + +} + +type testListener struct { + ops map[string]int +} + +func (tl *testListener) ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error { + tl.ops["manifest:push"]++ + + return nil +} + +func (tl *testListener) ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error { + tl.ops["manifest:pull"]++ + return nil +} + +func (tl *testListener) ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error { + tl.ops["manifest:delete"]++ + return nil +} + +func (tl *testListener) LayerPushed(repo storage.Repository, layer storage.Layer) error { + tl.ops["layer:push"]++ + return nil +} + +func (tl *testListener) LayerPulled(repo storage.Repository, layer storage.Layer) error { + tl.ops["layer:pull"]++ + return nil +} + +func (tl *testListener) LayerDeleted(repo storage.Repository, layer storage.Layer) error { + tl.ops["layer:delete"]++ + return nil +} + +// checkExerciseRegistry takes the registry through all of its operations, +// carrying out generic checks. +func checkExerciseRepository(t *testing.T, repository storage.Repository) { + // TODO(stevvooe): This would be a nice testutil function. Basically, it + // takes the registry through a common set of operations. This could be + // used to make cross-cutting updates by changing internals that affect + // update counts. Basically, it would make writing tests a lot easier. + + tag := "thetag" + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: repository.Name(), + Tag: tag, + } + + layers := repository.Layers() + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating test layer: %v", err) + } + dgst := digest.Digest(ds) + upload, err := layers.Upload() + if err != nil { + t.Fatalf("error creating layer upload: %v", err) + } + + // Use the resumes, as well! + upload, err = layers.Resume(upload.UUID()) + if err != nil { + t.Fatalf("error resuming layer upload: %v", err) + } + + io.Copy(upload, rs) + + if _, err := upload.Finish(dgst); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + + m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + BlobSum: dgst, + }) + + // Then fetch the layers + if _, err := layers.Fetch(dgst); err != nil { + t.Fatalf("error fetching layer: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + manifests := repository.Manifests() + + if err := manifests.Put(tag, sm); err != nil { + t.Fatalf("unexpected error putting the manifest: %v", err) + } + + fetched, err := manifests.Get(tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if fetched.Tag != fetched.Tag { + t.Fatalf("retrieved unexpected manifest: %v", err) + } +} diff --git a/docs/storage/notifications/metrics.go b/docs/storage/notifications/metrics.go new file mode 100644 index 00000000..2a8ffcbd --- /dev/null +++ b/docs/storage/notifications/metrics.go @@ -0,0 +1,152 @@ +package notifications + +import ( + "expvar" + "fmt" + "net/http" + "sync" +) + +// EndpointMetrics track various actions taken by the endpoint, typically by +// number of events. The goal of this to export it via expvar but we may find +// some other future solution to be better. +type EndpointMetrics struct { + Pending int // events pending in queue + Events int // total events incoming + Successes int // total events written successfully + Failures int // total events failed + Errors int // total events errored + Statuses map[string]int // status code histogram, per call event +} + +// safeMetrics guards the metrics implementation with a lock and provides a +// safe update function. +type safeMetrics struct { + EndpointMetrics + sync.Mutex // protects statuses map +} + +// newSafeMetrics returns safeMetrics with map allocated. +func newSafeMetrics() *safeMetrics { + var sm safeMetrics + sm.Statuses = make(map[string]int) + return &sm +} + +// httpStatusListener returns the listener for the http sink that updates the +// relevent counters. +func (sm *safeMetrics) httpStatusListener() httpStatusListener { + return &endpointMetricsHTTPStatusListener{ + safeMetrics: sm, + } +} + +// eventQueueListener returns a listener that maintains queue related counters. +func (sm *safeMetrics) eventQueueListener() eventQueueListener { + return &endpointMetricsEventQueueListener{ + safeMetrics: sm, + } +} + +// endpointMetricsHTTPStatusListener increments counters related to http sinks +// for the relevent events. +type endpointMetricsHTTPStatusListener struct { + *safeMetrics +} + +var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} + +func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Successes += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) + emsl.Failures += len(events) +} + +func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { + emsl.safeMetrics.Lock() + defer emsl.safeMetrics.Unlock() + emsl.Errors += len(events) +} + +// endpointMetricsEventQueueListener maintains the incoming events counter and +// the queues pending count. +type endpointMetricsEventQueueListener struct { + *safeMetrics +} + +func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Events += len(events) + eqc.Pending += len(events) +} + +func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { + eqc.Lock() + defer eqc.Unlock() + eqc.Pending -= len(events) +} + +// endpoints is global registry of endpoints used to report metrics to expvar +var endpoints struct { + registered []*Endpoint + mu sync.Mutex +} + +// register places the endpoint into expvar so that stats are tracked. +func register(e *Endpoint) { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + endpoints.registered = append(endpoints.registered, e) +} + +func init() { + // NOTE(stevvooe): Setup registry metrics structure to report to expvar. + // Ideally, we do more metrics through logging but we need some nice + // realtime metrics for queue state for now. + + registry := expvar.Get("registry") + + if registry == nil { + registry = expvar.NewMap("registry") + } + + var notifications expvar.Map + notifications.Init() + notifications.Set("endpoints", expvar.Func(func() interface{} { + endpoints.mu.Lock() + defer endpoints.mu.Unlock() + + var names []interface{} + for _, v := range endpoints.registered { + var epjson struct { + Name string `json:"name"` + URL string `json:"url"` + EndpointConfig + + Metrics EndpointMetrics + } + + epjson.Name = v.Name() + epjson.URL = v.URL() + epjson.EndpointConfig = v.EndpointConfig + + v.ReadMetrics(&epjson.Metrics) + + names = append(names, epjson) + } + + return names + })) + + registry.(*expvar.Map).Set("notifications", ¬ifications) +} diff --git a/docs/storage/notifications/sinks.go b/docs/storage/notifications/sinks.go new file mode 100644 index 00000000..2bf63e2d --- /dev/null +++ b/docs/storage/notifications/sinks.go @@ -0,0 +1,337 @@ +package notifications + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// NOTE(stevvooe): This file contains definitions for several utility sinks. +// Typically, the broadcaster is the only sink that should be required +// externally, but others are suitable for export if the need arises. Albeit, +// the tight integration with endpoint metrics should be removed. + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan []Event + closed chan chan struct{} +} + +// NewBroadcaster ... +// Add appends one or more sinks to the list of sinks. The broadcaster +// behavior will be affected by the properties of the sink. Generally, the +// sink should accept all messages and deal with reliability on its own. Use +// of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan []Event), + closed: make(chan chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts a block of events to be dispatched to all sinks. This method +// will never fail and should never block (hopefully!). The caller cedes the +// slice memory to the broadcaster and should not modify it after calling +// write. +func (b *Broadcaster) Write(events ...Event) error { + select { + case b.events <- events: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + logrus.Infof("broadcaster: closing") + select { + case <-b.closed: + // already closed + return fmt.Errorf("broadcaster: already closed") + default: + // do a little chan handoff dance to synchronize closing + closed := make(chan struct{}) + b.closed <- closed + close(b.closed) + <-closed + return nil + } +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + for { + select { + case block := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(block...); err != nil { + logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) + } + } + case closing := <-b.closed: + + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil { + logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) + } + } + closing <- struct{}{} + + logrus.Debugf("broadcaster: closed") + return + } + } +} + +// eventQueue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type eventQueue struct { + sink Sink + events *list.List + listeners []eventQueueListener + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// eventQueueListener is called when various events happen on the queue. +type eventQueueListener interface { + ingress(events ...Event) + egress(events ...Event) +} + +// newEventQueue returns a queue to the provided sink. If the updater is non- +// nil, it will be called to update pending metrics on ingress and egress. +func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { + eq := eventQueue{ + sink: sink, + events: list.New(), + listeners: listeners, + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// beend closed. +func (eq *eventQueue) Write(events ...Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + for _, listener := range eq.listeners { + listener.ingress(events...) + } + eq.events.PushBack(events) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *eventQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return fmt.Errorf("eventqueue: already closed") + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + + return eq.sink.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *eventQueue) run() { + for { + block := eq.next() + + if block == nil { + return // nil block means event queue is closed. + } + + if err := eq.sink.Write(block...); err != nil { + logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) + } + + for _, listener := range eq.listeners { + listener.egress(block...) + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *eventQueue) next() []Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.([]Event) + eq.events.Remove(front) + + return block +} + +// retryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Internally, it is a circuit breaker retries to manage reset. +// Concurrent calls to a retrying sink are serialized through the sink, +// meaning that if one is in-flight, another will not proceed. +type retryingSink struct { + mu sync.Mutex + sink Sink + closed bool + + // circuit breaker hueristics + failures struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + } +} + +type retryingSinkListener interface { + active(events ...Event) + retry(events ...Event) +} + +// TODO(stevvooe): We are using circuit break here, which actually doesn't +// make a whole lot of sense for this use case, since we always retry. Move +// this to use bounded exponential backoff. + +// newRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { + rs := &retryingSink{ + sink: sink, + } + rs.failures.threshold = threshold + rs.failures.backoff = backoff + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *retryingSink) Write(events ...Event) error { + rs.mu.Lock() + defer rs.mu.Unlock() + +retry: + + if rs.closed { + return ErrSinkClosed + } + + if !rs.proceed() { + logrus.Warnf("%v encountered too many errors, backing off", rs.sink) + rs.wait(rs.failures.backoff) + goto retry + } + + if err := rs.write(events...); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logrus.Errorf("retryingsink: error writing events: %v, retrying", err) + goto retry + } + + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *retryingSink) Close() error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if rs.closed { + return fmt.Errorf("retryingsink: already closed") + } + + rs.closed = true + return rs.sink.Close() +} + +// write provides a helper that dispatches failure and success properly. Used +// by write as the single-flight write call. +func (rs *retryingSink) write(events ...Event) error { + if err := rs.sink.Write(events...); err != nil { + rs.failure() + return err + } + + rs.reset() + return nil +} + +// wait backoff time against the sink, unlocking so others can proceed. Should +// only be called by methods that currently have the mutex. +func (rs *retryingSink) wait(backoff time.Duration) { + rs.mu.Unlock() + defer rs.mu.Lock() + + // backoff here + time.Sleep(backoff) +} + +// reset marks a succesful call. +func (rs *retryingSink) reset() { + rs.failures.recent = 0 + rs.failures.last = time.Time{} +} + +// failure records a failure. +func (rs *retryingSink) failure() { + rs.failures.recent++ + rs.failures.last = time.Now().UTC() +} + +// proceed returns true if the call should proceed based on circuit breaker +// hueristics. +func (rs *retryingSink) proceed() bool { + return rs.failures.recent < rs.failures.threshold || + time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) +} diff --git a/docs/storage/notifications/sinks_test.go b/docs/storage/notifications/sinks_test.go new file mode 100644 index 00000000..89756a99 --- /dev/null +++ b/docs/storage/notifications/sinks_test.go @@ -0,0 +1,223 @@ +package notifications + +import ( + "fmt" + "math/rand" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "testing" +) + +func TestBroadcaster(t *testing.T) { + const nEvents = 1000 + var sinks []Sink + + for i := 0; i < 10; i++ { + sinks = append(sinks, &testSink{}) + } + + b := NewBroadcaster(sinks...) + + var block []Event + var wg sync.WaitGroup + for i := 1; i <= nEvents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := b.Write(block...); err != nil { + t.Fatalf("error writing block of length %d: %v", len(block), err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() // Wait until writes complete + checkClose(t, b) + + // Iterate through the sinks and check that they all have the expected length. + for _, sink := range sinks { + ts := sink.(*testSink) + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != nEvents { + t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + } + +} + +func TestEventQueue(t *testing.T) { + const nevents = 1000 + var ts testSink + metrics := newSafeMetrics() + eq := newEventQueue( + // delayed sync simulates destination slower than channel comms + &delayedSink{ + Sink: &ts, + delay: time.Millisecond * 1, + }, metrics.eventQueueListener()) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= nevents; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + if err := eq.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + wg.Done() + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, eq) + + ts.mu.Lock() + defer ts.mu.Unlock() + metrics.Lock() + defer metrics.Unlock() + + if len(ts.events) != nevents { + t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) + } + + if !ts.closed { + t.Fatalf("sink should have been closed") + } + + if metrics.Events != nevents { + t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) + } + + if metrics.Pending != 0 { + t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) + } +} + +func TestRetryingSink(t *testing.T) { + + // Make a sync that fails most of the time, ensuring that all the events + // make it through. + var ts testSink + flaky := &flakySink{ + rate: 1.0, // start out always failing. + Sink: &ts, + } + s := newRetryingSink(flaky, 3, 10*time.Millisecond) + + var wg sync.WaitGroup + var block []Event + for i := 1; i <= 100; i++ { + block = append(block, createTestEvent("push", "library/test", "blob")) + + // Above 50, set the failure rate lower + if i > 50 { + s.mu.Lock() + flaky.rate = 0.90 + s.mu.Unlock() + } + + if i%10 == 0 && i > 0 { + wg.Add(1) + go func(block ...Event) { + defer wg.Done() + if err := s.Write(block...); err != nil { + t.Fatalf("error writing event block: %v", err) + } + }(block...) + + block = nil + } + } + + wg.Wait() + checkClose(t, s) + + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.events) != 100 { + t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) + } +} + +type testSink struct { + events []Event + mu sync.Mutex + closed bool +} + +func (ts *testSink) Write(events ...Event) error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.events = append(ts.events, events...) + return nil +} + +func (ts *testSink) Close() error { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.closed = true + + logrus.Infof("closing testSink") + return nil +} + +type delayedSink struct { + Sink + delay time.Duration +} + +func (ds *delayedSink) Write(events ...Event) error { + time.Sleep(ds.delay) + return ds.Sink.Write(events...) +} + +type flakySink struct { + Sink + rate float64 +} + +func (fs *flakySink) Write(events ...Event) error { + if rand.Float64() < fs.rate { + return fmt.Errorf("error writing %d events", len(events)) + } + + return fs.Sink.Write(events...) +} + +func checkClose(t *testing.T, sink Sink) { + if err := sink.Close(); err != nil { + t.Fatalf("unexpected error closing: %v", err) + } + + // second close should not crash but should return an error. + if err := sink.Close(); err == nil { + t.Fatalf("no error on double close") + } + + // Write after closed should be an error + if err := sink.Write([]Event{}...); err == nil { + t.Fatalf("write after closed did not have an error") + } else if err != ErrSinkClosed { + t.Fatalf("error should be ErrSinkClosed") + } +} diff --git a/docs/storage/paths.go b/docs/storage/paths.go new file mode 100644 index 00000000..9380dc65 --- /dev/null +++ b/docs/storage/paths.go @@ -0,0 +1,458 @@ +package storage + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const storagePathVersion = "v2" + +// pathMapper maps paths based on "object names" and their ids. The "object +// names" mapped by pathMapper are internal to the storage system. +// +// The path layout in the storage backend is roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> _manifests/ +// revisions +// -> +// -> link +// -> signatures +// //link +// tags/ +// -> current/link +// -> index +// -> //link +// -> _layers/ +// +// -> _uploads/ +// data +// startedat +// -> blob/ +// +// +// The storage backend layout is broken up into a content- addressable blob +// store and repositories. The content-addressable blob store holds most data +// throughout the backend, keyed by algorithm and digests of the underlying +// content. Access to the blob store is controled through links from the +// repository to blobstore. +// +// A repository is made up of layers, manifests and tags. The layers component +// is just a directory of layers which are "linked" into a repository. A layer +// can only be accessed through a qualified repository name if it is linked in +// the repository. Uploads of layers are managed in the uploads directory, +// which is key by upload uuid. When all data for an upload is received, the +// data is moved into the blob store and the upload directory is deleted. +// Abandoned uploads can be garbage collected by reading the startedat file +// and removing uploads that have been active for longer than a certain time. +// +// The third component of the repository directory is the manifests store, +// which is made up of a revision store and tag store. Manifests are stored in +// the blob store and linked into the revision store. Signatures are separated +// from the manifest payload data and linked into the blob store, as well. +// While the registry can save all revisions of a manifest, no relationship is +// implied as to the ordering of changes to a manifest. The tag store provides +// support for name, tag lookups of manifests, using "current/link" under a +// named tag directory. An index is maintained to support deletions of all +// revisions of a given manifest tag. +// +// We cover the path formats implemented by this path mapper below. +// +// Manifests: +// +// manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// +// manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link +// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ +// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link +// +// Tags: +// +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index///link +// +// Layers: +// +// layerLinkPathSpec: /v2/repositories//_layers/tarsum////link +// +// Uploads: +// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// +// Blob Store: +// +// blobPathSpec: /v2/blobs/// +// blobDataPathSpec: /v2/blobs////data +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +type pathMapper struct { + root string + version string // should be a constant? +} + +var defaultPathMapper = &pathMapper{ + root: "/docker/registry/", + version: storagePathVersion, +} + +// path returns the path identified by spec. +func (pm *pathMapper) path(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + rootPrefix := []string{pm.root, pm.version} + repoPrefix := append(rootPrefix, "repositories") + + switch v := spec.(type) { + + case manifestRevisionPathSpec: + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil + case manifestRevisionLinkPathSpec: + root, err := pm.path(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil + case manifestSignaturesPathSpec: + root, err := pm.path(manifestRevisionPathSpec{ + name: v.name, + revision: v.revision, + }) + + if err != nil { + return "", err + } + + return path.Join(root, "signatures"), nil + case manifestSignatureLinkPathSpec: + root, err := pm.path(manifestSignaturesPathSpec{ + name: v.name, + revision: v.revision, + }) + if err != nil { + return "", err + } + + signatureComponents, err := digestPathComponents(v.signature, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil + case manifestTagsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil + case manifestTagPathSpec: + root, err := pm.path(manifestTagsPathSpec{ + name: v.name, + }) + if err != nil { + return "", err + } + + return path.Join(root, v.tag), nil + case manifestTagCurrentPathSpec: + root, err := pm.path(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + return path.Join(root, "current", "link"), nil + case manifestTagIndexPathSpec: + root, err := pm.path(manifestTagPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + return path.Join(root, "index"), nil + case manifestTagIndexEntryPathSpec: + root, err := pm.path(manifestTagIndexPathSpec{ + name: v.name, + tag: v.tag, + }) + if err != nil { + return "", err + } + + components, err := digestPathComponents(v.revision, false) + if err != nil { + return "", err + } + + return path.Join(root, path.Join(append(components, "link")...)), nil + case layerLinkPathSpec: + components, err := digestPathComponents(v.digest, false) + if err != nil { + return "", err + } + + // For now, only map tarsum paths. + if components[0] != "tarsum" { + // Only tarsum is supported, for now + return "", fmt.Errorf("unsupported content digest: %v", v.digest) + } + + layerLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil + case blobDataPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + + case uploadDataPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil + case uploadStartedAtPathSpec: + return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// manifestRevisionPathSpec describes the components of the directory path for +// a manifest revision. +type manifestRevisionPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionPathSpec) pathSpec() {} + +// manifestRevisionLinkPathSpec describes the path components required to look +// up the data link for a revision of a manifest. If this file is not present, +// the manifest blob is not available in the given repo. The contents of this +// file should just be the digest. +type manifestRevisionLinkPathSpec struct { + name string + revision digest.Digest +} + +func (manifestRevisionLinkPathSpec) pathSpec() {} + +// manifestSignaturesPathSpec decribes the path components for the directory +// containing all the signatures for the target blob. Entries are named with +// the underlying key id. +type manifestSignaturesPathSpec struct { + name string + revision digest.Digest +} + +func (manifestSignaturesPathSpec) pathSpec() {} + +// manifestSignatureLinkPathSpec decribes the path components used to look up +// a signature file by the hash of its blob. +type manifestSignatureLinkPathSpec struct { + name string + revision digest.Digest + signature digest.Digest +} + +func (manifestSignatureLinkPathSpec) pathSpec() {} + +// manifestTagsPathSpec describes the path elements required to point to the +// manifest tags directory. +type manifestTagsPathSpec struct { + name string +} + +func (manifestTagsPathSpec) pathSpec() {} + +// manifestTagPathSpec describes the path elements required to point to the +// manifest tag links files under a repository. These contain a blob id that +// can be used to look up the data and signatures. +type manifestTagPathSpec struct { + name string + tag string +} + +func (manifestTagPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the current revision for a +// given tag. +type manifestTagCurrentPathSpec struct { + name string + tag string +} + +func (manifestTagCurrentPathSpec) pathSpec() {} + +// manifestTagCurrentPathSpec describes the link to the index of revisions +// with the given tag. +type manifestTagIndexPathSpec struct { + name string + tag string +} + +func (manifestTagIndexPathSpec) pathSpec() {} + +// manifestTagIndexEntryPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryPathSpec) pathSpec() {} + +// layerLink specifies a path for a layer link, which is a file with a blob +// id. The layer link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This says indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + digest digest.Digest +} + +func (layerLinkPathSpec) pathSpec() {} + +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Mostly, this is to provide some heirachry for tarsum digests. Paths +// should be "safe" before getting this far due to strict digest requirements +// but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) + +// // blobPathSpec contains the path for the registry global blob store. +// type blobPathSpec struct { +// digest digest.Digest +// } + +// func (blobPathSpec) pathSpec() {} + +// blobDataPathSpec contains the path for the registry global blob store. For +// now, this contains layer data, exclusively. +type blobDataPathSpec struct { + digest digest.Digest +} + +func (blobDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters of the data file for +// uploads. +type uploadDataPathSpec struct { + name string + uuid string +} + +func (uploadDataPathSpec) pathSpec() {} + +// uploadDataPathSpec defines the path parameters for the file that stores the +// start time of an uploads. If it is missing, the upload is considered +// unknown. Admittedly, the presence of this file is an ugly hack to make sure +// we have a way to cleanup old or stalled uploads that doesn't rely on driver +// FileInfo behavior. If we come up with a more clever way to do this, we +// should remove this file immediately and rely on the startetAt field from +// the client to enforce time out policies. +type uploadStartedAtPathSpec struct { + name string + uuid string +} + +func (uploadStartedAtPathSpec) pathSpec() {} + +// digestPathComponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// / +// +// Most importantly, for tarsum, the layout looks like this: +// +// tarsum/// +// +// If multilevel is true, the first two bytes of the digest will separate +// groups of digest folder. It will be as follows: +// +// // +// +func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err + } + + algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + hex := dgst.Hex() + prefix := []string{algorithm} + + var suffix []string + + if multilevel { + suffix = append(suffix, hex[:2]) + } + + suffix = append(suffix, hex) + + if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { + // We have a tarsum! + version := tsi.Version + if version == "" { + version = "v0" + } + + prefix = []string{ + "tarsum", + version, + tsi.Algorithm, + } + } + + return append(prefix, suffix...), nil +} diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go new file mode 100644 index 00000000..79410e75 --- /dev/null +++ b/docs/storage/paths_test.go @@ -0,0 +1,138 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestPathMapper(t *testing.T) { + pm := &pathMapper{ + root: "/pathmapper-test", + } + + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: manifestRevisionPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + }, + { + spec: manifestRevisionLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + }, + { + spec: manifestSignatureLinkPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + signature: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + }, + { + spec: manifestSignaturesPathSpec{ + name: "foo/bar", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + }, + { + spec: manifestTagsPathSpec{ + name: "foo/bar", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags", + }, + { + spec: manifestTagPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag", + }, + { + spec: manifestTagCurrentPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link", + }, + { + spec: manifestTagIndexPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index", + }, + { + spec: manifestTagIndexEntryPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789", + }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", + }, + { + spec: layerLinkPathSpec{ + name: "foo/bar", + digest: "tarsum.v1+test:abcdef", + }, + expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", + }, + { + spec: blobDataPathSpec{ + digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", + }, + { + spec: blobDataPathSpec{ + digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + }, + + { + spec: uploadDataPathSpec{ + name: "foo/bar", + uuid: "asdf-asdf-asdf-adsf", + }, + expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + }, + { + spec: uploadStartedAtPathSpec{ + name: "foo/bar", + uuid: "asdf-asdf-asdf-adsf", + }, + expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + }, + } { + p, err := pm.path(testcase.spec) + if err != nil { + t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) + } + } + + // Add a few test cases to ensure we cover some errors + + // Specify a path that requires a revision and get a digest validation error. + badpath, err := pm.path(manifestSignaturesPathSpec{ + name: "foo/bar", + }) + if err == nil { + t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) + } + +} diff --git a/docs/storage/registry.go b/docs/storage/registry.go new file mode 100644 index 00000000..ed865007 --- /dev/null +++ b/docs/storage/registry.go @@ -0,0 +1,80 @@ +package storage + +import ( + "github.com/docker/distribution/storagedriver" + "golang.org/x/net/context" +) + +// registry is the top-level implementation of Registry for use in the storage +// package. All instances should descend from this object. +type registry struct { + driver storagedriver.StorageDriver + pm *pathMapper + blobStore *blobStore +} + +// NewRegistryWithDriver creates a new registry instance from the provided +// driver. The resulting registry may be shared by multiple goroutines but is +// cheap to allocate. +func NewRegistryWithDriver(driver storagedriver.StorageDriver) Registry { + bs := &blobStore{} + + reg := ®istry{ + driver: driver, + blobStore: bs, + + // TODO(sday): This should be configurable. + pm: defaultPathMapper, + } + + reg.blobStore.registry = reg + + return reg +} + +// Repository returns an instance of the repository tied to the registry. +// Instances should not be shared between goroutines but are cheap to +// allocate. In general, they should be request scoped. +func (reg *registry) Repository(ctx context.Context, name string) Repository { + return &repository{ + ctx: ctx, + registry: reg, + name: name, + } +} + +// repository provides name-scoped access to various services. +type repository struct { + *registry + ctx context.Context + name string +} + +// Name returns the name of the repository. +func (repo *repository) Name() string { + return repo.name +} + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Manifests() ManifestService { + return &manifestStore{ + repository: repo, + revisionStore: &revisionStore{ + repository: repo, + }, + tagStore: &tagStore{ + repository: repo, + }, + } +} + +// Layers returns an instance of the LayerService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (repo *repository) Layers() LayerService { + return &layerStore{ + repository: repo, + } +} diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go new file mode 100644 index 00000000..b3ecd711 --- /dev/null +++ b/docs/storage/revisionstore.go @@ -0,0 +1,207 @@ +package storage + +import ( + "encoding/json" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +// revisionStore supports storing and managing manifest revisions. +type revisionStore struct { + *repository +} + +// exists returns true if the revision is available in the named repository. +func (rs *revisionStore) exists(revision digest.Digest) (bool, error) { + revpath, err := rs.pm.path(manifestRevisionPathSpec{ + name: rs.Name(), + revision: revision, + }) + + if err != nil { + return false, err + } + + exists, err := exists(rs.driver, revpath) + if err != nil { + return false, err + } + + return exists, nil +} + +// get retrieves the manifest, keyed by revision digest. +func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, error) { + // Ensure that this revision is available in this repository. + if exists, err := rs.exists(revision); err != nil { + return nil, err + } else if !exists { + return nil, ErrUnknownManifestRevision{ + Name: rs.Name(), + Revision: revision, + } + } + + content, err := rs.blobStore.get(revision) + if err != nil { + return nil, err + } + + // Fetch the signatures for the manifest + signatures, err := rs.getSignatures(revision) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm manifest.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + + return &sm, nil +} + +// put stores the manifest in the repository, if not already present. Any +// updated signatures will be stored, as well. +func (rs *revisionStore) put(sm *manifest.SignedManifest) (digest.Digest, error) { + // Resolve the payload in the manifest. + payload, err := sm.Payload() + if err != nil { + return "", err + } + + // Digest and store the manifest payload in the blob store. + revision, err := rs.blobStore.put(payload) + if err != nil { + logrus.Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := rs.link(revision); err != nil { + return "", err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + for _, signature := range signatures { + if err := rs.putSignature(revision, signature); err != nil { + return "", err + } + } + + return revision, nil +} + +// link links the revision into the repository. +func (rs *revisionStore) link(revision digest.Digest) error { + revisionPath, err := rs.pm.path(manifestRevisionLinkPathSpec{ + name: rs.Name(), + revision: revision, + }) + + if err != nil { + return err + } + + if exists, err := exists(rs.driver, revisionPath); err != nil { + return err + } else if exists { + // Revision has already been linked! + return nil + } + + return rs.blobStore.link(revisionPath, revision) +} + +// delete removes the specified manifest revision from storage. +func (rs *revisionStore) delete(revision digest.Digest) error { + revisionPath, err := rs.pm.path(manifestRevisionPathSpec{ + name: rs.Name(), + revision: revision, + }) + + if err != nil { + return err + } + + return rs.driver.Delete(revisionPath) +} + +// getSignatures retrieves all of the signature blobs for the specified +// manifest revision. +func (rs *revisionStore) getSignatures(revision digest.Digest) ([][]byte, error) { + signaturesPath, err := rs.pm.path(manifestSignaturesPathSpec{ + name: rs.Name(), + revision: revision, + }) + + if err != nil { + return nil, err + } + + // Need to append signature digest algorithm to path to get all items. + // Perhaps, this should be in the pathMapper but it feels awkward. This + // can be eliminated by implementing listAll on drivers. + signaturesPath = path.Join(signaturesPath, "sha256") + + signaturePaths, err := rs.driver.List(signaturesPath) + if err != nil { + return nil, err + } + + var signatures [][]byte + for _, sigPath := range signaturePaths { + // Append the link portion + sigPath = path.Join(sigPath, "link") + + // TODO(stevvooe): These fetches should be parallelized for performance. + p, err := rs.blobStore.linked(sigPath) + if err != nil { + return nil, err + } + + signatures = append(signatures, p) + } + + return signatures, nil +} + +// putSignature stores the signature for the provided manifest revision. +func (rs *revisionStore) putSignature(revision digest.Digest, signature []byte) error { + signatureDigest, err := rs.blobStore.put(signature) + if err != nil { + return err + } + + signaturePath, err := rs.pm.path(manifestSignatureLinkPathSpec{ + name: rs.Name(), + revision: revision, + signature: signatureDigest, + }) + + if err != nil { + return err + } + + return rs.blobStore.link(signaturePath, signatureDigest) +} diff --git a/docs/storage/services.go b/docs/storage/services.go new file mode 100644 index 00000000..7e6ac476 --- /dev/null +++ b/docs/storage/services.go @@ -0,0 +1,84 @@ +package storage + +import ( + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "golang.org/x/net/context" +) + +// TODO(stevvooe): These types need to be moved out of the storage package. + +// Registry represents a collection of repositories, addressable by name. +type Registry interface { + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name string) Repository +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Name returns the name of the repository. + Name() string + + // Manifests returns a reference to this repository's manifest service. + Manifests() ManifestService + + // Layers returns a reference to this repository's layers service. + Layers() LayerService +} + +// ManifestService provides operations on image manifests. +type ManifestService interface { + // Tags lists the tags under the named repository. + Tags() ([]string, error) + + // Exists returns true if the manifest exists. + Exists(tag string) (bool, error) + + // Get retrieves the named manifest, if it exists. + Get(tag string) (*manifest.SignedManifest, error) + + // Put creates or updates the named manifest. + // Put(tag string, manifest *manifest.SignedManifest) (digest.Digest, error) + Put(tag string, manifest *manifest.SignedManifest) error + + // Delete removes the named manifest, if it exists. + Delete(tag string) error + + // TODO(stevvooe): There are several changes that need to be done to this + // interface: + // + // 1. Get(tag string) should be GetByTag(tag string) + // 2. Put(tag string, manifest *manifest.SignedManifest) should be + // Put(manifest *manifest.SignedManifest). The method can read the + // tag on manifest to automatically tag it in the repository. + // 3. Need a GetByDigest(dgst digest.Digest) method. + // 4. Allow explicit tagging with Tag(digest digest.Digest, tag string) + // 5. Support reading tags with a re-entrant reader to avoid large + // allocations in the registry. + // 6. Long-term: Provide All() method that lets one scroll through all of + // the manifest entries. + // 7. Long-term: break out concept of signing from manifests. This is + // really a part of the distribution sprint. + // 8. Long-term: Manifest should be an interface. This code shouldn't + // really be concerned with the storage format. +} + +// LayerService provides operations on layer files in a backend storage. +type LayerService interface { + // Exists returns true if the layer exists. + Exists(digest digest.Digest) (bool, error) + + // Fetch the layer identifed by TarSum. + Fetch(digest digest.Digest) (Layer, error) + + // Upload begins a layer upload to repository identified by name, + // returning a handle. + Upload() (LayerUpload, error) + + // Resume continues an in progress layer upload, returning a handle to the + // upload. The caller should seek to the latest desired upload location + // before proceeding. + Resume(uuid string) (LayerUpload, error) +} diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go new file mode 100644 index 00000000..f7b87a25 --- /dev/null +++ b/docs/storage/tagstore.go @@ -0,0 +1,157 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/storagedriver" +) + +// tagStore provides methods to manage manifest tags in a backend storage driver. +type tagStore struct { + *repository +} + +// tags lists the manifest tags for the specified repository. +func (ts *tagStore) tags() ([]string, error) { + p, err := ts.pm.path(manifestTagPathSpec{ + name: ts.name, + }) + if err != nil { + return nil, err + } + + var tags []string + entries, err := ts.driver.List(p) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return nil, ErrUnknownRepository{Name: ts.name} + default: + return nil, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + + tags = append(tags, filename) + } + + return tags, nil +} + +// exists returns true if the specified manifest tag exists in the repository. +func (ts *tagStore) exists(tag string) (bool, error) { + tagPath, err := ts.pm.path(manifestTagCurrentPathSpec{ + name: ts.Name(), + tag: tag, + }) + if err != nil { + return false, err + } + + exists, err := exists(ts.driver, tagPath) + if err != nil { + return false, err + } + + return exists, nil +} + +// tag tags the digest with the given tag, updating the the store to point at +// the current tag. The digest must point to a manifest. +func (ts *tagStore) tag(tag string, revision digest.Digest) error { + indexEntryPath, err := ts.pm.path(manifestTagIndexEntryPathSpec{ + name: ts.Name(), + tag: tag, + revision: revision, + }) + + if err != nil { + return err + } + + currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ + name: ts.Name(), + tag: tag, + }) + + if err != nil { + return err + } + + // Link into the index + if err := ts.blobStore.link(indexEntryPath, revision); err != nil { + return err + } + + // Overwrite the current link + return ts.blobStore.link(currentPath, revision) +} + +// resolve the current revision for name and tag. +func (ts *tagStore) resolve(tag string) (digest.Digest, error) { + currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ + name: ts.Name(), + tag: tag, + }) + + if err != nil { + return "", err + } + + if exists, err := exists(ts.driver, currentPath); err != nil { + return "", err + } else if !exists { + return "", ErrUnknownManifest{Name: ts.Name(), Tag: tag} + } + + revision, err := ts.blobStore.readlink(currentPath) + if err != nil { + return "", err + } + + return revision, nil +} + +// revisions returns all revisions with the specified name and tag. +func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) { + manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{ + name: ts.Name(), + tag: tag, + }) + + if err != nil { + return nil, err + } + + // TODO(stevvooe): Need to append digest alg to get listing of revisions. + manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256") + + entries, err := ts.driver.List(manifestTagIndexPath) + if err != nil { + return nil, err + } + + var revisions []digest.Digest + for _, entry := range entries { + revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry))) + } + + return revisions, nil +} + +// delete removes the tag from repository, including the history of all +// revisions that have the specified tag. +func (ts *tagStore) delete(tag string) error { + tagPath, err := ts.pm.path(manifestTagPathSpec{ + name: ts.Name(), + tag: tag, + }) + if err != nil { + return err + } + + return ts.driver.Delete(tagPath) +} From 6e4f9a2e3ed354911f93e92630188c3e97d61f4e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 18:14:23 -0800 Subject: [PATCH 023/501] Move storagedriver package to registry/storage/driver This change is slightly more complex than previous package maves in that the package name changed. To address this, we simply always reference the package driver as storagedriver to avoid compatbility issues with existing code. While unfortunate, this can be cleaned up over time. Signed-off-by: Stephen J Day --- docs/handlers/api_test.go | 4 +- docs/handlers/app.go | 8 +- docs/handlers/app_test.go | 4 +- docs/storage/blobstore.go | 2 +- docs/storage/cloudfrontlayerhandler.go | 2 +- docs/storage/delegatelayerhandler.go | 2 +- docs/storage/driver/README.md | 49 + docs/storage/driver/azure/azure.go | 347 +++++ docs/storage/driver/azure/azure_test.go | 65 + docs/storage/driver/azure/blockblob.go | 24 + docs/storage/driver/azure/blockblob_test.go | 155 +++ docs/storage/driver/azure/blockid.go | 60 + docs/storage/driver/azure/blockid_test.go | 74 ++ docs/storage/driver/azure/randomwriter.go | 208 +++ .../storage/driver/azure/randomwriter_test.go | 339 +++++ docs/storage/driver/azure/zerofillwriter.go | 49 + .../driver/azure/zerofillwriter_test.go | 126 ++ docs/storage/driver/base/base.go | 141 ++ docs/storage/driver/factory/factory.go | 71 + docs/storage/driver/fileinfo.go | 79 ++ docs/storage/driver/filesystem/README.md | 8 + docs/storage/driver/filesystem/driver.go | 286 ++++ docs/storage/driver/filesystem/driver_test.go | 29 + docs/storage/driver/inmemory/README.md | 10 + docs/storage/driver/inmemory/driver.go | 257 ++++ docs/storage/driver/inmemory/driver_test.go | 24 + docs/storage/driver/inmemory/mfs.go | 333 +++++ docs/storage/driver/ipc/client.go | 454 +++++++ docs/storage/driver/ipc/ipc.go | 148 +++ docs/storage/driver/ipc/server.go | 178 +++ docs/storage/driver/s3/README.md | 26 + docs/storage/driver/s3/s3.go | 712 ++++++++++ docs/storage/driver/s3/s3_test.go | 97 ++ docs/storage/driver/storagedriver.go | 118 ++ docs/storage/driver/testsuites/testsuites.go | 1183 +++++++++++++++++ docs/storage/filereader.go | 2 +- docs/storage/filereader_test.go | 2 +- docs/storage/filewriter.go | 2 +- docs/storage/filewriter_test.go | 2 +- docs/storage/layer_test.go | 4 +- docs/storage/layerhandler.go | 2 +- docs/storage/layerstore.go | 2 +- docs/storage/layerupload.go | 2 +- docs/storage/manifeststore_test.go | 2 +- docs/storage/notifications/listener_test.go | 2 +- docs/storage/registry.go | 2 +- docs/storage/tagstore.go | 2 +- 47 files changed, 5674 insertions(+), 24 deletions(-) create mode 100644 docs/storage/driver/README.md create mode 100644 docs/storage/driver/azure/azure.go create mode 100644 docs/storage/driver/azure/azure_test.go create mode 100644 docs/storage/driver/azure/blockblob.go create mode 100644 docs/storage/driver/azure/blockblob_test.go create mode 100644 docs/storage/driver/azure/blockid.go create mode 100644 docs/storage/driver/azure/blockid_test.go create mode 100644 docs/storage/driver/azure/randomwriter.go create mode 100644 docs/storage/driver/azure/randomwriter_test.go create mode 100644 docs/storage/driver/azure/zerofillwriter.go create mode 100644 docs/storage/driver/azure/zerofillwriter_test.go create mode 100644 docs/storage/driver/base/base.go create mode 100644 docs/storage/driver/factory/factory.go create mode 100644 docs/storage/driver/fileinfo.go create mode 100644 docs/storage/driver/filesystem/README.md create mode 100644 docs/storage/driver/filesystem/driver.go create mode 100644 docs/storage/driver/filesystem/driver_test.go create mode 100644 docs/storage/driver/inmemory/README.md create mode 100644 docs/storage/driver/inmemory/driver.go create mode 100644 docs/storage/driver/inmemory/driver_test.go create mode 100644 docs/storage/driver/inmemory/mfs.go create mode 100644 docs/storage/driver/ipc/client.go create mode 100644 docs/storage/driver/ipc/ipc.go create mode 100644 docs/storage/driver/ipc/server.go create mode 100644 docs/storage/driver/s3/README.md create mode 100644 docs/storage/driver/s3/s3.go create mode 100644 docs/storage/driver/s3/s3_test.go create mode 100644 docs/storage/driver/storagedriver.go create mode 100644 docs/storage/driver/testsuites/testsuites.go diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 08d0cf97..a14e93dc 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -14,11 +14,11 @@ import ( "reflect" "testing" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/configuration" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - _ "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/api/v2" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 09c0c621..7a36309b 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -7,14 +7,14 @@ import ( "os" "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/registry/storage" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/registry/storage/notifications" - "github.com/docker/distribution/storagedriver" - "github.com/docker/distribution/storagedriver/factory" "github.com/gorilla/mux" "golang.org/x/net/context" ) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index b27c788a..158f5fc1 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -7,11 +7,11 @@ import ( "net/url" "testing" + "github.com/docker/distribution/configuration" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/auth/silly" - "github.com/docker/distribution/configuration" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/storage/driver/inmemory" "golang.org/x/net/context" ) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index ac123f44..975df19f 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -5,7 +5,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) diff --git a/docs/storage/cloudfrontlayerhandler.go b/docs/storage/cloudfrontlayerhandler.go index fa420cc7..f887895c 100644 --- a/docs/storage/cloudfrontlayerhandler.go +++ b/docs/storage/cloudfrontlayerhandler.go @@ -10,7 +10,7 @@ import ( "time" "github.com/AdRoll/goamz/cloudfront" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // cloudFrontLayerHandler provides an simple implementation of layerHandler that diff --git a/docs/storage/delegatelayerhandler.go b/docs/storage/delegatelayerhandler.go index 7ed6d87b..01354023 100644 --- a/docs/storage/delegatelayerhandler.go +++ b/docs/storage/delegatelayerhandler.go @@ -5,7 +5,7 @@ import ( "net/http" "time" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // delegateLayerHandler provides a simple implementation of layerHandler that diff --git a/docs/storage/driver/README.md b/docs/storage/driver/README.md new file mode 100644 index 00000000..b603503e --- /dev/null +++ b/docs/storage/driver/README.md @@ -0,0 +1,49 @@ +Docker-Registry Storage Driver +============================== + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +Provided Drivers +================ + +This storage driver package comes bundled with three default drivers. + +1. filesystem: A local storage driver configured to use a directory tree in the local filesystem. +2. s3: A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +3. inmemory: A temporary storage driver using a local inmemory map. This exists solely for reference and testing. + +Storage Driver API +================== + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. + +Driver Selection and Configuration +================================== + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. + +Driver Contribution +=================== + +## Writing new storage drivers +To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. + +### In-process drivers +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +### Out-of-process drivers +As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. + +Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. + +## Testing +Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. + +## Drivers written in other languages +Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go new file mode 100644 index 00000000..6ccbff40 --- /dev/null +++ b/docs/storage/driver/azure/azure.go @@ -0,0 +1,347 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" +) + +type driver struct { + client azure.BlobStorageClient + container string +} + +type baseEmbed struct{ base.Base } + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct{ baseEmbed } + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok || fmt.Sprint(accountName) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok || fmt.Sprint(accountKey) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container)) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container string) (*Driver, error) { + api, err := azure.NewBasicClient(accountName, accountKey) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + d := &driver{ + client: *blobClient, + container: container} + return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil +} + +// Implement the storagedriver.StorageDriver interface. + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(path string, contents []byte) error { + return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + info, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + size := int64(info.ContentLength) + if offset >= size { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange) + if err != nil { + return nil, err + } + return resp, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64, error) { + if blobExists, err := d.client.BlobExists(d.container, path); err != nil { + return 0, err + } else if !blobExists { + err := d.client.CreateBlockBlob(d.container, path) + if err != nil { + return 0, err + } + } + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + zw := newZeroFillWriter(&bw) + return zw.Write(d.container, path, offset, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { + // Check if the path is a blob + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if ok { + blob, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + + mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(blob.ContentLength), + ModTime: mtim, + IsDir: false, + }}, nil + } + + // Check if path is a virtual container + virtContainerPath := path + if !strings.HasSuffix(virtContainerPath, "/") { + virtContainerPath += "/" + } + blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Prefix: virtContainerPath, + MaxResults: 1, + }) + if err != nil { + return nil, err + } + if len(blobs.Blobs) > 0 { + // path is a virtual container + return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + }}, nil + } + + // path is not a blob or virtual container + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// URLFor returns a publicly accessible URL for the blob stored at given path +// for specified duration by making use of Azure Storage Shared Access Signatures (SAS). +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. +func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { + expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration + expires, ok := options["expiry"] + if ok { + t, ok := expires.(time.Time) + if ok { + expiresTime = t + } + } + return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + e, ok := err.(azure.StorageServiceError) + return ok && e.StatusCode == 404 +} diff --git a/docs/storage/driver/azure/azure_test.go b/docs/storage/driver/azure/azure_test.go new file mode 100644 index 00000000..a8fdf3e9 --- /dev/null +++ b/docs/storage/driver/azure/azure_test.go @@ -0,0 +1,65 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck) + // testsuites.RegisterIPCSuite(driverName, map[string]string{ + // paramAccountName: accountName, + // paramAccountKey: accountKey, + // paramContainer: container, + // }, skipCheck) +} diff --git a/docs/storage/driver/azure/blockblob.go b/docs/storage/driver/azure/blockblob.go new file mode 100644 index 00000000..d868453f --- /dev/null +++ b/docs/storage/driver/azure/blockblob.go @@ -0,0 +1,24 @@ +package azure + +import ( + "fmt" + "io" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +// azureBlockStorage is adaptor between azure.BlobStorageClient and +// blockStorage interface. +type azureBlockStorage struct { + azure.BlobStorageClient +} + +func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { + return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) +} + +func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { + a := azureBlockStorage{} + a.BlobStorageClient = b + return a +} diff --git a/docs/storage/driver/azure/blockblob_test.go b/docs/storage/driver/azure/blockblob_test.go new file mode 100644 index 00000000..f1e39027 --- /dev/null +++ b/docs/storage/driver/azure/blockblob_test.go @@ -0,0 +1,155 @@ +package azure + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +type StorageSimulator struct { + blobs map[string]*BlockBlob +} + +type BlockBlob struct { + blocks map[string]*DataBlock + blockList []string +} + +type DataBlock struct { + data []byte + committed bool +} + +func (s *StorageSimulator) path(container, blob string) string { + return fmt.Sprintf("%s/%s", container, blob) +} + +func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { + _, ok := s.blobs[s.path(container, blob)] + return ok, nil +} + +func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return nil, fmt.Errorf("blob not found") + } + + var readers []io.Reader + for _, bID := range bb.blockList { + readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) + } + return ioutil.NopCloser(io.MultiReader(readers...)), nil +} + +func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { + r, err := s.GetBlob(container, blob) + if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil +} + +func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { + path := s.path(container, blob) + bb := &BlockBlob{ + blocks: make(map[string]*DataBlock), + blockList: []string{}, + } + s.blobs[path] = bb + return nil +} + +func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { + path := s.path(container, blob) + bb, ok := s.blobs[path] + if !ok { + return fmt.Errorf("blob not found") + } + data := make([]byte, len(chunk)) + copy(data, chunk) + bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob + return nil +} + +func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { + resp := azure.BlockListResponse{} + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return resp, fmt.Errorf("blob not found") + } + + // Iterate committed blocks (in order) + if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { + for _, blockID := range bb.blockList { + b := bb.blocks[blockID] + block := azure.BlockResponse{ + Name: blockID, + Size: int64(len(b.data)), + } + resp.CommittedBlocks = append(resp.CommittedBlocks, block) + } + + } + + // Iterate uncommitted blocks (in no order) + if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { + for blockID, b := range bb.blocks { + block := azure.BlockResponse{ + Name: blockID, + Size: int64(len(b.data)), + } + if !b.committed { + resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) + } + } + } + return resp, nil +} + +func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { + bb, ok := s.blobs[s.path(container, blob)] + if !ok { + return fmt.Errorf("blob not found") + } + + var blockIDs []string + for _, v := range blocks { + bl, ok := bb.blocks[v.Id] + if !ok { // check if block ID exists + return fmt.Errorf("Block id '%s' not found", v.Id) + } + bl.committed = true + blockIDs = append(blockIDs, v.Id) + } + + // Mark all other blocks uncommitted + for k, b := range bb.blocks { + inList := false + for _, v := range blockIDs { + if k == v { + inList = true + break + } + } + if !inList { + b.committed = false + } + } + + bb.blockList = blockIDs + return nil +} + +func NewStorageSimulator() StorageSimulator { + return StorageSimulator{ + blobs: make(map[string]*BlockBlob), + } +} diff --git a/docs/storage/driver/azure/blockid.go b/docs/storage/driver/azure/blockid.go new file mode 100644 index 00000000..61f41ebc --- /dev/null +++ b/docs/storage/driver/azure/blockid.go @@ -0,0 +1,60 @@ +package azure + +import ( + "encoding/base64" + "fmt" + "math/rand" + "sync" + "time" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +type blockIDGenerator struct { + pool map[string]bool + r *rand.Rand + m sync.Mutex +} + +// Generate returns an unused random block id and adds the generated ID +// to list of used IDs so that the same block name is not used again. +func (b *blockIDGenerator) Generate() string { + b.m.Lock() + defer b.m.Unlock() + + var id string + for { + id = toBlockID(int(b.r.Int())) + if !b.exists(id) { + break + } + } + b.pool[id] = true + return id +} + +func (b *blockIDGenerator) exists(id string) bool { + _, used := b.pool[id] + return used +} + +func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { + b.m.Lock() + defer b.m.Unlock() + + for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { + b.pool[bl.Name] = true + } +} + +func newBlockIDGenerator() *blockIDGenerator { + return &blockIDGenerator{ + pool: make(map[string]bool), + r: rand.New(rand.NewSource(time.Now().UnixNano()))} +} + +// toBlockId converts given integer to base64-encoded block ID of a fixed length. +func toBlockID(i int) string { + s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs + return base64.StdEncoding.EncodeToString([]byte(s)) +} diff --git a/docs/storage/driver/azure/blockid_test.go b/docs/storage/driver/azure/blockid_test.go new file mode 100644 index 00000000..46d52a34 --- /dev/null +++ b/docs/storage/driver/azure/blockid_test.go @@ -0,0 +1,74 @@ +package azure + +import ( + "math" + "testing" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +func Test_blockIdGenerator(t *testing.T) { + r := newBlockIDGenerator() + + for i := 1; i <= 10; i++ { + if expected := i - 1; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + if id := r.Generate(); id == "" { + t.Fatal("returned empty id") + } + if expected := i; len(r.pool) != expected { + t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) + } + } +} + +func Test_blockIdGenerator_Feed(t *testing.T) { + r := newBlockIDGenerator() + if expected := 0; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed empty list + blocks := azure.BlockListResponse{} + r.Feed(blocks) + if expected := 0; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed blocks + blocks = azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"1", 1}, + {"2", 2}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"3", 3}, + }} + r.Feed(blocks) + if expected := 3; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } + + // feed same block IDs with committed/uncommitted place changed + blocks = azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"3", 3}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"1", 1}, + }} + r.Feed(blocks) + if expected := 3; len(r.pool) != expected { + t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) + } +} + +func Test_toBlockId(t *testing.T) { + min := 0 + max := math.MaxInt64 + + if len(toBlockID(min)) != len(toBlockID(max)) { + t.Fatalf("different-sized blockIDs are returned") + } +} diff --git a/docs/storage/driver/azure/randomwriter.go b/docs/storage/driver/azure/randomwriter.go new file mode 100644 index 00000000..c89dd0a3 --- /dev/null +++ b/docs/storage/driver/azure/randomwriter.go @@ -0,0 +1,208 @@ +package azure + +import ( + "fmt" + "io" + "io/ioutil" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +// blockStorage is the interface required from a block storage service +// client implementation +type blockStorage interface { + CreateBlockBlob(container, blob string) error + GetBlob(container, blob string) (io.ReadCloser, error) + GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) + PutBlock(container, blob, blockID string, chunk []byte) error + GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) + PutBlockList(container, blob string, blocks []azure.Block) error +} + +// randomBlobWriter enables random access semantics on Azure block blobs +// by enabling writing arbitrary length of chunks to arbitrary write offsets +// within the blob. Normally, Azure Blob Storage does not support random +// access semantics on block blobs; however, this writer can download, split and +// reupload the overlapping blocks and discards those being overwritten entirely. +type randomBlobWriter struct { + bs blockStorage + blockSize int +} + +func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { + return randomBlobWriter{bs: bs, blockSize: blockSize} +} + +// WriteBlobAt writes the given chunk to the specified position of an existing blob. +// The offset must be equals to size of the blob or smaller than it. +func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { + rand := newBlockIDGenerator() + + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + rand.Feed(blocks) // load existing block IDs + + // Check for write offset for existing blob + size := getBlobSize(blocks) + if offset < 0 || offset > size { + return 0, fmt.Errorf("wrong offset for Write: %v", offset) + } + + // Upload the new chunk as blocks + blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) + if err != nil { + return 0, err + } + + // For non-append operations, existing blocks may need to be splitted + if offset != size { + // Split the block on the left end (if any) + leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) + if err != nil { + return 0, err + } + blockList = append(leftBlocks, blockList...) + + // Split the block on the right end (if any) + rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) + if err != nil { + return 0, err + } + blockList = append(blockList, rightBlocks...) + } else { + // Use existing block list + var existingBlocks []azure.Block + for _, v := range blocks.CommittedBlocks { + existingBlocks = append(existingBlocks, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + } + blockList = append(existingBlocks, blockList...) + } + // Put block list + return nn, r.bs.PutBlockList(container, blob, blockList) +} + +func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { + blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) + if err != nil { + return 0, err + } + return getBlobSize(blocks), nil +} + +// writeChunkToBlocks writes given chunk to one or multiple blocks within specified +// blob and returns their block representations. Those blocks are not committed, yet +func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { + var newBlocks []azure.Block + var nn int64 + + // Read chunks of at most size N except the last chunk to + // maximize block size and minimize block count. + buf := make([]byte, r.blockSize) + for { + n, err := io.ReadFull(chunk, buf) + if err == io.EOF { + break + } + nn += int64(n) + data := buf[:n] + blockID := rand.Generate() + if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { + return newBlocks, nn, err + } + newBlocks = append(newBlocks, azure.Block{Id: blockID, Status: azure.BlockStatusUncommitted}) + } + return newBlocks, nn, nil +} + +// blocksLeftSide returns the blocks that are going to be at the left side of +// the writeOffset: [0, writeOffset) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { + var left []azure.Block + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return left, err + } + + o := writeOffset + elapsed := int64(0) + for _, v := range bx.CommittedBlocks { + blkSize := int64(v.Size) + if o >= blkSize { // use existing block + left = append(left, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + o -= blkSize + elapsed += blkSize + } else if o > 0 { // current block needs to be splitted + start := elapsed + size := o + part, err := r.bs.GetSectionReader(container, blob, start, size) + if err != nil { + return left, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return left, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return left, err + } + left = append(left, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + break + } + } + return left, nil +} + +// blocksRightSide returns the blocks that are going to be at the right side of +// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain +// the same and splitting blocks and reuploading them as needed. +func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { + var right []azure.Block + + bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) + if err != nil { + return nil, err + } + + re := writeOffset + chunkSize - 1 // right end of written chunk + var elapsed int64 + for _, v := range bx.CommittedBlocks { + var ( + bs = elapsed // left end of current block + be = elapsed + int64(v.Size) - 1 // right end of current block + ) + + if bs > re { // take the block as is + right = append(right, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + } else if be > re { // current block needs to be splitted + part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) + if err != nil { + return right, err + } + newBlockID := rand.Generate() + + data, err := ioutil.ReadAll(part) + if err != nil { + return right, err + } + if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { + return right, err + } + right = append(right, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + } + elapsed += int64(v.Size) + } + return right, nil +} + +func getBlobSize(blocks azure.BlockListResponse) int64 { + var n int64 + for _, v := range blocks.CommittedBlocks { + n += int64(v.Size) + } + return n +} diff --git a/docs/storage/driver/azure/randomwriter_test.go b/docs/storage/driver/azure/randomwriter_test.go new file mode 100644 index 00000000..5201e3b4 --- /dev/null +++ b/docs/storage/driver/azure/randomwriter_test.go @@ -0,0 +1,339 @@ +package azure + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "reflect" + "strings" + "testing" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +func TestRandomWriter_writeChunkToBlocks(t *testing.T) { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 3) + rand := newBlockIDGenerator() + c := []byte("AAABBBCCCD") + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) + if err != nil { + t.Fatal(err) + } + if expected := int64(len(c)); nn != expected { + t.Fatalf("wrong nn:%v, expected:%v", nn, expected) + } + if expected := 4; len(bw) != expected { + t.Fatal("unexpected written block count") + } + + bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) + if err != nil { + t.Fatal(err) + } + if expected := 0; len(bx.CommittedBlocks) != expected { + t.Fatal("unexpected committed block count") + } + if expected := 4; len(bx.UncommittedBlocks) != expected { + t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) + } + + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + assertBlobContents(t, r, c) +} + +func TestRandomWriter_blocksLeftSide(t *testing.T) { + blob := "AAAAABBBBBCCC" + cases := []struct { + offset int64 + expectedBlob string + expectedPattern []azure.BlockStatus + }{ + {0, "", []azure.BlockStatus{}}, // write to beginning, discard all + {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change + {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 + {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block + {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block + {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block + } + + for _, c := range cases { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 5) + rand := newBlockIDGenerator() + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) + if err != nil { + t.Fatal(err) + } + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) + if err != nil { + t.Fatal(err) + } + + bs := []azure.BlockStatus{} + for _, v := range bx { + bs = append(bs, v.Status) + } + + if !reflect.DeepEqual(bs, c.expectedPattern) { + t.Logf("Committed blocks %v", bw) + t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) + } + if rw.bs.PutBlockList("a", "b", bx); err != nil { + t.Fatal(err) + } + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + cout, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + outBlob := string(cout) + if outBlob != c.expectedBlob { + t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) + } + } +} + +func TestRandomWriter_blocksRightSide(t *testing.T) { + blob := "AAAAABBBBBCCC" + cases := []struct { + offset int64 + size int64 + expectedBlob string + expectedPattern []azure.BlockStatus + }{ + {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob + {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block + {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block + {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains + {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block + {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block + {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index + {13, 20, "", []azure.BlockStatus{}}, // append to the end + } + + for _, c := range cases { + s := NewStorageSimulator() + rw := newRandomBlobWriter(&s, 5) + rand := newBlockIDGenerator() + + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) + if err != nil { + t.Fatal(err) + } + if err := rw.bs.PutBlockList("a", "b", bw); err != nil { + t.Fatal(err) + } + bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) + if err != nil { + t.Fatal(err) + } + + bs := []azure.BlockStatus{} + for _, v := range bx { + bs = append(bs, v.Status) + } + + if !reflect.DeepEqual(bs, c.expectedPattern) { + t.Logf("Committed blocks %v", bw) + t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) + } + if rw.bs.PutBlockList("a", "b", bx); err != nil { + t.Fatal(err) + } + r, err := rw.bs.GetBlob("a", "b") + if err != nil { + t.Fatal(err) + } + cout, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + outBlob := string(cout) + if outBlob != c.expectedBlob { + t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) + } + } +} + +func TestRandomWriter_Write_NewBlob(t *testing.T) { + var ( + s = NewStorageSimulator() + rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks + blob = randomContents(1024 * 7) // 7 KB blob + ) + if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { + t.Fatal("expected error, got nil") + } + if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { + t.Fatal("expected error, got nil") + } + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { + t.Fatal(err) + } else if expected := int64(len(blob)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if len(bx.CommittedBlocks) != 3 { + t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) + } + + // Replace first 512 bytes + leftChunk := randomContents(512) + blob = append(leftChunk, blob[512:]...) + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(leftChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 4; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) + } + + // Replace last 512 bytes with 1024 bytes + rightChunk := randomContents(1024) + offset := int64(len(blob) - 512) + blob = append(blob[:offset], rightChunk...) + if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(rightChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 5; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) + } + + // Replace 2K-4K (overlaps 2 blocks from L/R) + newChunk := randomContents(1024 * 2) + offset = 1024 * 2 + blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) + if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(newChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, blob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 6; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) + } + + // Replace the entire blob + newBlob := randomContents(1024 * 30) + if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { + t.Fatal(err) + } else if expected := int64(len(newBlob)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := rw.bs.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, newBlob) + } + if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { + t.Fatal(err) + } else if expected := 10; len(bx.CommittedBlocks) != expected { + t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) + } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { + t.Fatalf("committed block size does not indicate blob size") + } +} + +func Test_getBlobSize(t *testing.T) { + // with some committed blocks + if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ + CommittedBlocks: []azure.BlockResponse{ + {"A", 100}, + {"B", 50}, + {"C", 1}, + }, + UncommittedBlocks: []azure.BlockResponse{ + {"D", 200}, + }}); expected != size { + t.Fatalf("wrong blob size: %v, expected: %v", size, expected) + } + + // with no committed blocks + if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ + UncommittedBlocks: []azure.BlockResponse{ + {"A", 100}, + {"B", 50}, + {"C", 1}, + {"D", 200}, + }}); expected != size { + t.Fatalf("wrong blob size: %v, expected: %v", size, expected) + } +} + +func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { + out, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(out, expected) { + t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) + } +} + +func randomContents(length int64) []byte { + b := make([]byte, length) + for i := range b { + b[i] = byte(rand.Intn(2 << 8)) + } + return b +} diff --git a/docs/storage/driver/azure/zerofillwriter.go b/docs/storage/driver/azure/zerofillwriter.go new file mode 100644 index 00000000..095489d2 --- /dev/null +++ b/docs/storage/driver/azure/zerofillwriter.go @@ -0,0 +1,49 @@ +package azure + +import ( + "bytes" + "io" +) + +type blockBlobWriter interface { + GetSize(container, blob string) (int64, error) + WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) +} + +// zeroFillWriter enables writing to an offset outside a block blob's size +// by offering the chunk to the underlying writer as a contiguous data with +// the gap in between filled with NUL (zero) bytes. +type zeroFillWriter struct { + blockBlobWriter +} + +func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { + w := zeroFillWriter{} + w.blockBlobWriter = b + return w +} + +// Write writes the given chunk to the specified existing blob even though +// offset is out of blob's size. The gaps are filled with zeros. Returned +// written number count does not include zeros written. +func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { + size, err := z.blockBlobWriter.GetSize(container, blob) + if err != nil { + return 0, err + } + + var reader io.Reader + var zeroPadding int64 + if offset <= size { + reader = chunk + } else { + zeroPadding = offset - size + offset = size // adjust offset to be the append index + zeros := bytes.NewReader(make([]byte, zeroPadding)) + reader = io.MultiReader(zeros, chunk) + } + + nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) + nn -= zeroPadding + return nn, err +} diff --git a/docs/storage/driver/azure/zerofillwriter_test.go b/docs/storage/driver/azure/zerofillwriter_test.go new file mode 100644 index 00000000..49361791 --- /dev/null +++ b/docs/storage/driver/azure/zerofillwriter_test.go @@ -0,0 +1,126 @@ +package azure + +import ( + "bytes" + "testing" +) + +func Test_zeroFillWrite_AppendNoGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*1) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024*3 + 512) + if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(firstChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + secondChunk := randomContents(256) + if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(firstChunk, secondChunk...)) + } + +} + +func Test_zeroFillWrite_StartWithGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + chunk := randomContents(1024 * 5) + padding := int64(1024*2 + 256) + if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(chunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(make([]byte, padding), chunk...)) + } +} + +func Test_zeroFillWrite_AppendWithGap(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024*3 + 512) + if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + secondChunk := randomContents(256) + padding := int64(1024 * 4) + if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) + } +} + +func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { + s := NewStorageSimulator() + bw := newRandomBlobWriter(&s, 1024*2) + zw := newZeroFillWriter(&bw) + if err := s.CreateBlockBlob("a", "b"); err != nil { + t.Fatal(err) + } + + firstChunk := randomContents(1024 * 3) + if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { + t.Fatal(err) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, firstChunk) + } + + // in this case, zerofill won't be used + secondChunk := randomContents(256) + if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { + t.Fatal(err) + } else if expected := int64(len(secondChunk)); expected != nn { + t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) + } + if out, err := s.GetBlob("a", "b"); err != nil { + t.Fatal(err) + } else { + assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) + } +} diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go new file mode 100644 index 00000000..0365ba3c --- /dev/null +++ b/docs/storage/driver/base/base.go @@ -0,0 +1,141 @@ +// Package base provides a base implementation of the storage driver that can +// be used to implement common checks. The goal is to increase the amount of +// code sharing. +// +// The canonical approach to use this class is to embed in the exported driver +// struct such that calls are proxied through this implementation. First, +// declare the internal driver, as follows: +// +// type driver struct { ... internal ...} +// +// The resulting type should implement StorageDriver such that it can be the +// target of a Base struct. The exported type can then be declared as follows: +// +// type Driver struct { +// Base +// } +// +// Because Driver embeds Base, it effectively implements Base. If the driver +// needs to intercept a call, before going to base, Driver should implement +// that method. Effectively, Driver can intercept calls before coming in and +// driver implements the actual logic. +// +// To further shield the embed from other packages, it is recommended to +// employ a private embed struct: +// +// type baseEmbed struct { +// base.Base +// } +// +// Then, declare driver to embed baseEmbed, rather than Base directly: +// +// type Driver struct { +// baseEmbed +// } +// +// The type now implements StorageDriver, proxying through Base, without +// exporting an unnessecary field. +package base + +import ( + "io" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// Base provides a wrapper around a storagedriver implementation that provides +// common path and bounds checking. +type Base struct { + storagedriver.StorageDriver +} + +// GetContent wraps GetContent of underlying storage driver. +func (base *Base) GetContent(path string) ([]byte, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.GetContent(path) +} + +// PutContent wraps PutContent of underlying storage driver. +func (base *Base) PutContent(path string, content []byte) error { + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.PutContent(path, content) +} + +// ReadStream wraps ReadStream of underlying storage driver. +func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.ReadStream(path, offset) +} + +// WriteStream wraps WriteStream of underlying storage driver. +func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if !storagedriver.PathRegexp.MatchString(path) { + return 0, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.WriteStream(path, offset, reader) +} + +// Stat wraps Stat of underlying storage driver. +func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.Stat(path) +} + +// List wraps List of underlying storage driver. +func (base *Base) List(path string) ([]string, error) { + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.List(path) +} + +// Move wraps Move of underlying storage driver. +func (base *Base) Move(sourcePath string, destPath string) error { + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath} + } + + return base.StorageDriver.Move(sourcePath, destPath) +} + +// Delete wraps Delete of underlying storage driver. +func (base *Base) Delete(path string) error { + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.Delete(path) +} + +// URLFor wraps URLFor of underlying storage driver. +func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return "", storagedriver.InvalidPathError{Path: path} + } + + return base.StorageDriver.URLFor(path, options) +} diff --git a/docs/storage/driver/factory/factory.go b/docs/storage/driver/factory/factory.go new file mode 100644 index 00000000..66d160f3 --- /dev/null +++ b/docs/storage/driver/factory/factory.go @@ -0,0 +1,71 @@ +package factory + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// driverFactories stores an internal mapping between storage driver names and their respective +// factories +var driverFactories = make(map[string]StorageDriverFactory) + +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces +// Storage drivers should call Register() with a factory to make the driver available by name +type StorageDriverFactory interface { + // Create returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and parameters +// To run in-process, the StorageDriverFactory must first be registered with the given name +// If no in-process drivers are found with the given name, this attempts to create an IPC driver +// If no in-process or external drivers are found, an InvalidStorageDriverError is returned +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + return nil, InvalidStorageDriverError{name} + + // NOTE(stevvooe): We are disabling storagedriver ipc for now, as the + // server and client need to be updated for the changed API calls and + // there were some problems libchan hanging. We'll phase this + // functionality back in over the next few weeks. + + // No registered StorageDriverFactory found, try ipc + // driverClient, err := ipc.NewDriverClient(name, parameters) + // if err != nil { + // return nil, InvalidStorageDriverError{name} + // } + // err = driverClient.Start() + // if err != nil { + // return nil, err + // } + // return driverClient, nil + } + return driverFactory.Create(parameters) +} + +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/docs/storage/driver/fileinfo.go b/docs/storage/driver/fileinfo.go new file mode 100644 index 00000000..e5064029 --- /dev/null +++ b/docs/storage/driver/fileinfo.go @@ -0,0 +1,79 @@ +package driver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/docs/storage/driver/filesystem/README.md b/docs/storage/driver/filesystem/README.md new file mode 100644 index 00000000..ba3ea564 --- /dev/null +++ b/docs/storage/driver/filesystem/README.md @@ -0,0 +1,8 @@ +Docker-Registry Filesystem Storage Driver +========================================= + +An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. + +## Parameters + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/tmp/registry/storage`. diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go new file mode 100644 index 00000000..0e5aea75 --- /dev/null +++ b/docs/storage/driver/filesystem/driver.go @@ -0,0 +1,286 @@ +package filesystem + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "filesystem" +const defaultRootDirectory = "/tmp/registry/storage" + +func init() { + factory.Register(driverName, &filesystemDriverFactory{}) +} + +// filesystemDriverFactory implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters), nil +} + +type driver struct { + rootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Optional Parameters: +// - rootdirectory +func FromParameters(parameters map[string]interface{}) *Driver { + var rootDirectory = defaultRootDirectory + if parameters != nil { + rootDir, ok := parameters["rootdirectory"] + if ok { + rootDirectory = fmt.Sprint(rootDir) + } + } + return New(rootDirectory) +} + +// New constructs a new Driver with a given rootDirectory +func New(rootDirectory string) *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + rootDirectory: rootDirectory, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(path string) ([]byte, error) { + rc, err := d.ReadStream(path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(subPath string, contents []byte) error { + if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return os.Truncate(d.fullPath(subPath), int64(len(contents))) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return file, nil +} + +// WriteStream stores the contents of the provided io.Reader at a location +// designated by the given path. +func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { + // TODO(stevvooe): This needs to be a requirement. + // if !path.IsAbs(subPath) { + // return fmt.Errorf("absolute path required: %q", subPath) + // } + + fullPath := d.fullPath(subPath) + parentDir := path.Dir(fullPath) + if err := os.MkdirAll(parentDir, 0755); err != nil { + return 0, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + // TODO(stevvooe): A few missing conditions in storage driver: + // 1. What if the path is already a directory? + // 2. Should number 1 be exposed explicitly in storagedriver? + // 2. Can this path not exist, even if we create above? + return 0, err + } + defer fp.Close() + + nn, err = fp.Seek(offset, os.SEEK_SET) + if err != nil { + return 0, err + } + + if nn != offset { + return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) + } + + return io.Copy(fp, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) + + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err + } + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(subPath string) ([]string, error) { + if subPath[len(subPath)-1] != '/' { + subPath += "/" + } + fullPath := d.fullPath(subPath) + + dir, err := os.Open(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + return nil, err + } + + defer dir.Close() + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(subPath, fileName)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(sourcePath string, destPath string) error { + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + + err := os.Rename(source, dest) + return err +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(subPath string) error { + fullPath := d.fullPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{Path: subPath} + } + + err = os.RemoveAll(fullPath) + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} diff --git a/docs/storage/driver/filesystem/driver_test.go b/docs/storage/driver/filesystem/driver_test.go new file mode 100644 index 00000000..8572de16 --- /dev/null +++ b/docs/storage/driver/filesystem/driver_test.go @@ -0,0 +1,29 @@ +package filesystem + +import ( + "io/ioutil" + "os" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { + return New(root), nil + }, testsuites.NeverSkip) + + // BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later. + // testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip) +} diff --git a/docs/storage/driver/inmemory/README.md b/docs/storage/driver/inmemory/README.md new file mode 100644 index 00000000..2447e2ca --- /dev/null +++ b/docs/storage/driver/inmemory/README.md @@ -0,0 +1,10 @@ +Docker-Registry In-Memory Storage Driver +========================================= + +An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. + +**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. + +## Parameters + +None diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go new file mode 100644 index 00000000..f2c9c3ff --- /dev/null +++ b/docs/storage/driver/inmemory/driver.go @@ -0,0 +1,257 @@ +package inmemory + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "inmemory" + +func init() { + factory.Register(driverName, &inMemoryDriverFactory{}) +} + +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +type driver struct { + root *dir + mutex sync.RWMutex +} + +// baseEmbed allows us to hide the Base embed. +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { + baseEmbed // embedded, hidden base driver. +} + +var _ storagedriver.StorageDriver = &Driver{} + +// New constructs a new Driver. +func New() *Driver { + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: &driver{ + root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }, + }, + }, + }, + } +} + +// Implement the storagedriver.StorageDriver interface. + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + rc, err := d.ReadStream(path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + return ioutil.ReadAll(rc) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(p string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + f, err := d.root.mkfile(p) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + path = normalize(path) + found := d.root.find(path) + + if found.path() != path { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := normalize(path) + + f, err := d.root.mkfile(normalized) + if err != nil { + return 0, fmt.Errorf("not a file") + } + + // Unlock while we are reading from the source, in case we are reading + // from the same mfs instance. This can be fixed by a more granular + // locking model. + d.mutex.Unlock() + d.mutex.RLock() // Take the readlock to block other writers. + var buf bytes.Buffer + + nn, err = buf.ReadFrom(reader) + if err != nil { + // TODO(stevvooe): This condition is odd and we may need to clarify: + // we've read nn bytes from reader but have written nothing to the + // backend. What is the correct return value? Really, the caller needs + // to know that the reader has been advanced and reattempting the + // operation is incorrect. + d.mutex.RUnlock() + d.mutex.Lock() + return nn, err + } + + d.mutex.RUnlock() + d.mutex.Lock() + f.WriteAt(buf.Bytes(), offset) + return nn, err +} + +// Stat returns info about the provided path. +func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + found := d.root.find(path) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *driver) List(path string) ([]string, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + normalized := normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... + } + + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err + } + } + + return entries, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err + } +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + + normalized := normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: path} + default: + return err + } +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} diff --git a/docs/storage/driver/inmemory/driver_test.go b/docs/storage/driver/inmemory/driver_test.go new file mode 100644 index 00000000..a02ff23e --- /dev/null +++ b/docs/storage/driver/inmemory/driver_test.go @@ -0,0 +1,24 @@ +package inmemory + +import ( + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(), nil + } + testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) + + // BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot + // the problems with libchan. + // testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) +} diff --git a/docs/storage/driver/inmemory/mfs.go b/docs/storage/driver/inmemory/mfs.go new file mode 100644 index 00000000..2bf859bc --- /dev/null +++ b/docs/storage/driver/inmemory/mfs.go @@ -0,0 +1,333 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + p = normalize(p) + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if normalize(srcDirname) != normalize(sp.path()) { + return errNotExists + } + + s, ok := sp.(*dir).children[srcFilename] + if !ok { + return errNotExists + } + + delete(sp.(*dir).children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if normalize(dirname) != normalize(parent.path()) { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data + } + + f.mod = time.Now() + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} diff --git a/docs/storage/driver/ipc/client.go b/docs/storage/driver/ipc/client.go new file mode 100644 index 00000000..daa823d7 --- /dev/null +++ b/docs/storage/driver/ipc/client.go @@ -0,0 +1,454 @@ +// +build ignore + +package ipc + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "syscall" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libchan" + "github.com/docker/libchan/spdy" +) + +// StorageDriverExecutablePrefix is the prefix which the IPC storage driver +// loader expects driver executables to begin with. For example, the s3 driver +// should be named "registry-storagedriver-s3". +const StorageDriverExecutablePrefix = "registry-storagedriver-" + +// StorageDriverClient is a storagedriver.StorageDriver implementation using a +// managed child process communicating over IPC using libchan with a unix domain +// socket +type StorageDriverClient struct { + subprocess *exec.Cmd + exitChan chan error + exitErr error + stopChan chan struct{} + socket *os.File + transport *spdy.Transport + sender libchan.Sender + version storagedriver.Version +} + +// NewDriverClient constructs a new out-of-process storage driver using the +// driver name and configuration parameters +// A user must call Start on this driver client before remote method calls can +// be made +// +// Looks for drivers in the following locations in order: +// - Storage drivers directory (to be determined, yet not implemented) +// - $GOPATH/bin +// - $PATH +func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { + paramsBytes, err := json.Marshal(parameters) + if err != nil { + return nil, err + } + + driverExecName := StorageDriverExecutablePrefix + name + driverPath, err := exec.LookPath(driverExecName) + if err != nil { + return nil, err + } + + command := exec.Command(driverPath, string(paramsBytes)) + + return &StorageDriverClient{ + subprocess: command, + }, nil +} + +// Start starts the designated child process storage driver and binds a socket +// to this process for IPC method calls +func (driver *StorageDriverClient) Start() error { + driver.exitErr = nil + driver.exitChan = make(chan error) + driver.stopChan = make(chan struct{}) + + fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) + if err != nil { + return err + } + + childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") + driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") + + driver.subprocess.Stdout = os.Stdout + driver.subprocess.Stderr = os.Stderr + driver.subprocess.ExtraFiles = []*os.File{childSocket} + + if err = driver.subprocess.Start(); err != nil { + driver.Stop() + return err + } + + go driver.handleSubprocessExit() + + if err = childSocket.Close(); err != nil { + driver.Stop() + return err + } + + connection, err := net.FileConn(driver.socket) + if err != nil { + driver.Stop() + return err + } + driver.transport, err = spdy.NewClientTransport(connection) + if err != nil { + driver.Stop() + return err + } + driver.sender, err = driver.transport.NewSendChannel() + if err != nil { + driver.Stop() + return err + } + + // Check the driver's version to determine compatibility + receiver, remoteSender := libchan.Pipe() + err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender}) + if err != nil { + driver.Stop() + return err + } + + var response VersionResponse + err = receiver.Receive(&response) + if err != nil { + driver.Stop() + return err + } + + if response.Error != nil { + return response.Error.Unwrap() + } + + driver.version = response.Version + + if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() { + return IncompatibleVersionError{driver.version} + } + + return nil +} + +// Stop stops the child process storage driver +// storagedriver.StorageDriver methods called after Stop will fail +func (driver *StorageDriverClient) Stop() error { + var closeSenderErr, closeTransportErr, closeSocketErr, killErr error + + if driver.sender != nil { + closeSenderErr = driver.sender.Close() + } + if driver.transport != nil { + closeTransportErr = driver.transport.Close() + } + if driver.socket != nil { + closeSocketErr = driver.socket.Close() + } + if driver.subprocess != nil { + killErr = driver.subprocess.Process.Kill() + } + if driver.stopChan != nil { + close(driver.stopChan) + } + + if closeSenderErr != nil { + return closeSenderErr + } else if closeTransportErr != nil { + return closeTransportErr + } else if closeSocketErr != nil { + return closeSocketErr + } + + return killErr +} + +// Implement the storagedriver.StorageDriver interface over IPC + +// GetContent retrieves the content stored at "path" as a []byte. +func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { + if err := driver.exited(); err != nil { + return nil, err + } + + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + response := new(ReadStreamResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error.Unwrap() + } + + defer response.Reader.Close() + contents, err := ioutil.ReadAll(response.Reader) + if err != nil { + return nil, err + } + return contents, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { + if err := driver.exited(); err != nil { + return err + } + + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} + err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + response := new(WriteStreamResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error.Unwrap() + } + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if err := driver.exited(); err != nil { + return nil, err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"Path": path, "Offset": offset} + err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + response := new(ReadStreamResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error.Unwrap() + } + + return response.Reader, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { + if err := driver.exited(); err != nil { + return err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader} + err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + response := new(WriteStreamResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error.Unwrap() + } + + return nil +} + +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. +func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { + if err := driver.exited(); err != nil { + return 0, err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return 0, err + } + + response := new(CurrentSizeResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return 0, err + } + + if response.Error != nil { + return 0, response.Error.Unwrap() + } + + return response.Position, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (driver *StorageDriverClient) List(path string) ([]string, error) { + if err := driver.exited(); err != nil { + return nil, err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + response := new(ListResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error.Unwrap() + } + + return response.Keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { + if err := driver.exited(); err != nil { + return err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} + err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + response := new(MoveResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error.Unwrap() + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (driver *StorageDriverClient) Delete(path string) error { + if err := driver.exited(); err != nil { + return err + } + + receiver, remoteSender := libchan.Pipe() + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + response := new(DeleteResponse) + err = driver.receiveResponse(receiver, response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error.Unwrap() + } + + return nil +} + +// handleSubprocessExit populates the exit channel until we have explicitly +// stopped the storage driver subprocess +// Requests can select on driver.exitChan and response receiving and not hang if +// the process exits +func (driver *StorageDriverClient) handleSubprocessExit() { + exitErr := driver.subprocess.Wait() + if exitErr == nil { + exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly") + } else { + exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr) + } + + driver.exitErr = exitErr + + for { + select { + case driver.exitChan <- exitErr: + case <-driver.stopChan: + close(driver.exitChan) + return + } + } +} + +// receiveResponse populates the response value with the next result from the +// given receiver, or returns an error if receiving failed or the driver has +// stopped +func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { + receiveChan := make(chan error, 1) + go func(receiver libchan.Receiver, receiveChan chan<- error) { + receiveChan <- receiver.Receive(response) + }(receiver, receiveChan) + + var err error + var ok bool + select { + case err = <-receiveChan: + case err, ok = <-driver.exitChan: + if !ok { + err = driver.exitErr + } + } + + return err +} + +// exited returns an exit error if the driver has exited or nil otherwise +func (driver *StorageDriverClient) exited() error { + select { + case err, ok := <-driver.exitChan: + if !ok { + return driver.exitErr + } + return err + default: + return nil + } +} diff --git a/docs/storage/driver/ipc/ipc.go b/docs/storage/driver/ipc/ipc.go new file mode 100644 index 00000000..dabb834d --- /dev/null +++ b/docs/storage/driver/ipc/ipc.go @@ -0,0 +1,148 @@ +// +build ignore + +package ipc + +import ( + "fmt" + "io" + "reflect" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libchan" +) + +// StorageDriver is the interface which IPC storage drivers must implement. As external storage +// drivers may be defined to use a different version of the storagedriver.StorageDriver interface, +// we use an additional version check to determine compatiblity. +type StorageDriver interface { + // Version returns the storagedriver.StorageDriver interface version which this storage driver + // implements, which is used to determine driver compatibility + Version() (storagedriver.Version, error) +} + +// IncompatibleVersionError is returned when a storage driver is using an incompatible version of +// the storagedriver.StorageDriver api +type IncompatibleVersionError struct { + version storagedriver.Version +} + +func (e IncompatibleVersionError) Error() string { + return fmt.Sprintf("Incompatible storage driver version: %s", e.version) +} + +// Request defines a remote method call request +// A return value struct is to be sent over the ResponseChannel +type Request struct { + Type string `codec:",omitempty"` + Parameters map[string]interface{} `codec:",omitempty"` + ResponseChannel libchan.Sender `codec:",omitempty"` +} + +// ResponseError is a serializable error type. +// The Type and Parameters may be used to reconstruct the same error on the +// client side, falling back to using the Type and Message if this cannot be +// done. +type ResponseError struct { + Type string `codec:",omitempty"` + Message string `codec:",omitempty"` + Parameters map[string]interface{} `codec:",omitempty"` +} + +// WrapError wraps an error in a serializable struct containing the error's type +// and message. +func WrapError(err error) *ResponseError { + if err == nil { + return nil + } + v := reflect.ValueOf(err) + re := ResponseError{ + Type: v.Type().String(), + Message: err.Error(), + } + + if v.Kind() == reflect.Struct { + re.Parameters = make(map[string]interface{}) + for i := 0; i < v.NumField(); i++ { + field := v.Type().Field(i) + re.Parameters[field.Name] = v.Field(i).Interface() + } + } + return &re +} + +// Unwrap returns the underlying error if it can be reconstructed, or the +// original ResponseError otherwise. +func (err *ResponseError) Unwrap() error { + var errVal reflect.Value + var zeroVal reflect.Value + + switch err.Type { + case "storagedriver.PathNotFoundError": + errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{}) + case "storagedriver.InvalidOffsetError": + errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{}) + } + if errVal == zeroVal { + return err + } + + for k, v := range err.Parameters { + fieldVal := errVal.Elem().FieldByName(k) + if fieldVal == zeroVal { + return err + } + fieldVal.Set(reflect.ValueOf(v)) + } + + if unwrapped, ok := errVal.Elem().Interface().(error); ok { + return unwrapped + } + + return err + +} + +func (err *ResponseError) Error() string { + return fmt.Sprintf("%s: %s", err.Type, err.Message) +} + +// IPC method call response object definitions + +// VersionResponse is a response for a Version request +type VersionResponse struct { + Version storagedriver.Version `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` +} + +// ReadStreamResponse is a response for a ReadStream request +type ReadStreamResponse struct { + Reader io.ReadCloser `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` +} + +// WriteStreamResponse is a response for a WriteStream request +type WriteStreamResponse struct { + Error *ResponseError `codec:",omitempty"` +} + +// CurrentSizeResponse is a response for a CurrentSize request +type CurrentSizeResponse struct { + Position uint64 `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` +} + +// ListResponse is a response for a List request +type ListResponse struct { + Keys []string `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` +} + +// MoveResponse is a response for a Move request +type MoveResponse struct { + Error *ResponseError `codec:",omitempty"` +} + +// DeleteResponse is a response for a Delete request +type DeleteResponse struct { + Error *ResponseError `codec:",omitempty"` +} diff --git a/docs/storage/driver/ipc/server.go b/docs/storage/driver/ipc/server.go new file mode 100644 index 00000000..4c6f1d4d --- /dev/null +++ b/docs/storage/driver/ipc/server.go @@ -0,0 +1,178 @@ +// +build ignore + +package ipc + +import ( + "bytes" + "io" + "io/ioutil" + "net" + "os" + "reflect" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libchan" + "github.com/docker/libchan/spdy" +) + +// StorageDriverServer runs a new IPC server handling requests for the given +// storagedriver.StorageDriver +// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in +// client.go +// +// To create a new out-of-process driver, create a main package which calls StorageDriverServer with +// a storagedriver.StorageDriver +func StorageDriverServer(driver storagedriver.StorageDriver) error { + childSocket := os.NewFile(3, "childSocket") + defer childSocket.Close() + conn, err := net.FileConn(childSocket) + if err != nil { + panic(err) + } + defer conn.Close() + if transport, err := spdy.NewServerTransport(conn); err != nil { + panic(err) + } else { + for { + receiver, err := transport.WaitReceiveChannel() + if err == io.EOF { + return nil + } else if err != nil { + panic(err) + } + go receive(driver, receiver) + } + } +} + +// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to +// handle each request +// Requests are expected to be of type ipc.Request as the parameters are unknown until the request +// type is deserialized +func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { + for { + var request Request + err := receiver.Receive(&request) + if err == io.EOF { + return + } else if err != nil { + panic(err) + } + go handleRequest(driver, request) + } +} + +// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go +// Responds to requests using the Request.ResponseChannel +func handleRequest(driver storagedriver.StorageDriver, request Request) { + switch request.Type { + case "Version": + err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion}) + if err != nil { + panic(err) + } + case "GetContent": + path, _ := request.Parameters["Path"].(string) + content, err := driver.GetContent(path) + var response ReadStreamResponse + if err != nil { + response = ReadStreamResponse{Error: WrapError(err)} + } else { + response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "PutContent": + path, _ := request.Parameters["Path"].(string) + reader, _ := request.Parameters["Reader"].(io.ReadCloser) + contents, err := ioutil.ReadAll(reader) + defer reader.Close() + if err == nil { + err = driver.PutContent(path, contents) + } + response := WriteStreamResponse{ + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "ReadStream": + path, _ := request.Parameters["Path"].(string) + // Depending on serialization method, Offset may be convereted to any int/uint type + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() + reader, err := driver.ReadStream(path, offset) + var response ReadStreamResponse + if err != nil { + response = ReadStreamResponse{Error: WrapError(err)} + } else { + response = ReadStreamResponse{Reader: reader} + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "WriteStream": + path, _ := request.Parameters["Path"].(string) + // Depending on serialization method, Offset may be convereted to any int/uint type + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() + // Depending on serialization method, Size may be convereted to any int/uint type + size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() + reader, _ := request.Parameters["Reader"].(io.ReadCloser) + err := driver.WriteStream(path, offset, size, reader) + response := WriteStreamResponse{ + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "CurrentSize": + path, _ := request.Parameters["Path"].(string) + position, err := driver.CurrentSize(path) + response := CurrentSizeResponse{ + Position: position, + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "List": + path, _ := request.Parameters["Path"].(string) + keys, err := driver.List(path) + response := ListResponse{ + Keys: keys, + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "Move": + sourcePath, _ := request.Parameters["SourcePath"].(string) + destPath, _ := request.Parameters["DestPath"].(string) + err := driver.Move(sourcePath, destPath) + response := MoveResponse{ + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "Delete": + path, _ := request.Parameters["Path"].(string) + err := driver.Delete(path) + response := DeleteResponse{ + Error: WrapError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + default: + panic(request) + } +} diff --git a/docs/storage/driver/s3/README.md b/docs/storage/driver/s3/README.md new file mode 100644 index 00000000..fb0dd014 --- /dev/null +++ b/docs/storage/driver/s3/README.md @@ -0,0 +1,26 @@ +Docker-Registry S3 Storage Driver +========================================= + +An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. + +## Parameters + +`accesskey`: Your aws access key. + +`secretkey`: Your aws secret key. + +**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. + +`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html + +`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transfering over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. + +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go new file mode 100644 index 00000000..eb9f08f4 --- /dev/null +++ b/docs/storage/driver/s3/s3.go @@ -0,0 +1,712 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the AdRoll/goamz client library for interfacing with +// s3. +// +// Because s3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that s3 guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" + + "github.com/AdRoll/goamz/aws" + "github.com/AdRoll/goamz/s3" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region aws.Region + Encrypt bool + Secure bool + V4Auth bool + ChunkSize int64 + RootDirectory string +} + +func init() { + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskey"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["secretkey"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(fmt.Sprint(regionName)) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + v4AuthBool := false + v4Auth, ok := parameters["v4auth"] + if ok { + v4AuthBool, ok = v4Auth.(bool) + if !ok { + return nil, fmt.Errorf("The v4auth parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + chunkSize, ok = chunkSizeParam.(int64) + if !ok || chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize parameter should be a number that is larger than 5*1024*1024") + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + v4AuthBool, + chunkSize, + fmt.Sprint(rootDirectory), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) + if err != nil { + return nil, err + } + + if !params.Secure { + params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) + } + + s3obj := s3.New(auth, params.Region) + bucket := s3obj.Bucket(params.Bucket) + + if params.V4Auth { + s3obj.Signature = aws.V4Signature + } else { + if params.Region.Name == "eu-central-1" { + return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") + } + } + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(path string) ([]byte, error) { + content, err := d.Bucket.Get(d.s3Path(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) + if err != nil { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []s3.Part{} + var part s3.Part + + multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := make([]byte, d.ChunkSize) + zeroBuf := make([]byte, d.ChunkSize) + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + // parts and partNumber are safe, because this function is the only one modifying them and we + // force it to be executed serially. + if bytesRead > 0 { + part, putErr := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if putErr != nil { + putErrChan <- putErr + } + + parts = append(parts, part) + partNumber++ + } + putErrChan <- nil + }(bytesRead, from, buf) + + buf = make([]byte, d.ChunkSize) + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.s3Path(path), nil) + if err != nil { + if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(zeroBuf).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(zeroBuf)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + s3.CopyOptions{}, + d.Bucket.Name+"/"+d.s3Path(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.s3Path(""), "", 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), "", 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), + s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(path string) error { + listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]s3.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *driver) getOptions() s3.Options { + return s3.Options{SSE: d.Encrypt} +} + +func getPermissions() s3.ACL { + return s3.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go new file mode 100644 index 00000000..fb2003e1 --- /dev/null +++ b/docs/storage/driver/s3/s3_test.go @@ -0,0 +1,97 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/AdRoll/goamz/aws" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + v4auth := os.Getenv("S3_USE_V4_AUTH") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor := func(region aws.Region) (storagedriver.StorageDriver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + v4AuthBool := true + if v4auth != "" { + v4AuthBool, err = strconv.ParseBool(v4auth) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + encryptBool, + secureBool, + v4AuthBool, + minChunkSize, + root, + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + // for _, region := range aws.Regions { + // if region == aws.USGovWest { + // continue + // } + + testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(aws.GetRegion(region)) + }, skipCheck) + // testsuites.RegisterIPCSuite(driverName, map[string]string{ + // "accesskey": accessKey, + // "secretkey": secretKey, + // "region": region.Name, + // "bucket": bucket, + // "encrypt": encrypt, + // }, skipCheck) + // } +} diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go new file mode 100644 index 00000000..dd8fb4a0 --- /dev/null +++ b/docs/storage/driver/storagedriver.go @@ -0,0 +1,118 @@ +package driver + +import ( + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" +) + +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version. +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version. +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version. +const CurrentVersion Version = "0.1" + +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. +type StorageDriver interface { + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. + GetContent(path string) ([]byte, error) + + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. + PutContent(path string, content []byte) error + + // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. + ReadStream(path string, offset int64) (io.ReadCloser, error) + + // WriteStream stores the contents of the provided io.ReadCloser at a + // location designated by the given path. + // May be used to resume writing a stream by providing a nonzero offset. + // The offset must be no larger than the CurrentSize for this path. + WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) + + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(path string) (FileInfo, error) + + // List returns a list of the objects that are direct descendants of the + //given path. + List(path string) ([]string, error) + + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. + Move(sourcePath string, destPath string) error + + // Delete recursively deletes all objects stored at "path" and its subpaths. + Delete(path string) error + + // URLFor returns a URL which may be used to retrieve the content stored at + // the given path, possibly using the given options. + // May return an UnsupportedMethodErr in certain StorageDriver + // implementations. + URLFor(path string, options map[string]interface{}) (string, error) +} + +// PathRegexp is the regular expression which each file path must match. A +// file path is absolute, beginning with a slash and containing a positive +// number of path components separated by slashes, where each component is +// restricted to lowercase alphanumeric characters or a period, underscore, or +// hyphen. +var PathRegexp = regexp.MustCompile(`^(/[a-z0-9._-]+)+$`) + +// UnsupportedMethodErr may be returned in the case where a StorageDriver implementation does not support an optional method. +var ErrUnsupportedMethod = errors.New("Unsupported method") + +// PathNotFoundError is returned when operating on a nonexistent path. +type PathNotFoundError struct { + Path string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("Path not found: %s", err.Path) +} + +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("Invalid path: %s", err.Path) +} + +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. +type InvalidOffsetError struct { + Path string + Offset int64 +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) +} diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go new file mode 100644 index 00000000..cfa3a48a --- /dev/null +++ b/docs/storage/driver/testsuites/testsuites.go @@ -0,0 +1,1183 @@ +package testsuites + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path" + "sort" + "sync" + "testing" + "time" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + + "gopkg.in/check.v1" +) + +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +// RegisterInProcessSuite registers an in-process storage driver test suite with +// the go test runner. +func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { + check.Suite(&DriverSuite{ + Constructor: driverConstructor, + SkipCheck: skipCheck, + }) +} + +// RegisterIPCSuite registers a storage driver test suite which runs the named +// driver as a child process with the given parameters. +func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { + panic("ipc testing is disabled for now") + + // NOTE(stevvooe): IPC testing is disabled for now. Uncomment the code + // block before and remove the panic when we phase it back in. + + // suite := &DriverSuite{ + // Constructor: func() (storagedriver.StorageDriver, error) { + // d, err := ipc.NewDriverClient(driverName, ipcParams) + // if err != nil { + // return nil, err + // } + // err = d.Start() + // if err != nil { + // return nil, err + // } + // return d, nil + // }, + // SkipCheck: skipCheck, + // } + // suite.Teardown = func() error { + // if suite.StorageDriver == nil { + // return nil + // } + + // driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) + // return driverClient.Stop() + // } + // check.Suite(suite) +} + +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. +type SkipCheck func() (reason string) + +// NeverSkip is a default SkipCheck which never skips the suite. +var NeverSkip SkipCheck = func() string { return "" } + +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. +type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. +type DriverTeardown func() error + +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. +// The intended way to create a DriverSuite is with RegisterInProcessSuite or +// RegisterIPCSuite. +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + SkipCheck + storagedriver.StorageDriver +} + +// SetUpSuite sets up the gocheck test suite. +func (suite *DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } + d, err := suite.Constructor() + c.Assert(err, check.IsNil) + suite.StorageDriver = d +} + +// TearDownSuite tears down the gocheck test suite. +func (suite *DriverSuite) TearDownSuite(c *check.C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, check.IsNil) + } +} + +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List("/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{ + "/a", + "/2", + "/aa", + "/a.a", + "/0-9/abcdefg", + "/abcdefg/z.75", + "/abc/1.2.3.4.5-6_zyx/123.z/4", + "/docker/docker-registry", + "/123.abc", + "/abc./abc", + "/.abc", + "/a--b", + "/a-.b", + "/_.abc"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{ + "", + "/", + "abc", + "123.abc", + "//bcd", + "/abc_123/", + "/Docker/docker-registry"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(firstPart(filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + } +} + +// TestWriteRead1 tests a simple write-read workflow. +func (suite *DriverSuite) TestWriteRead1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead2 tests a simple write-read workflow with unicode data. +func (suite *DriverSuite) TestWriteRead2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead3 tests a simple write-read workflow with a small string. +func (suite *DriverSuite) TestWriteRead3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. +func (suite *DriverSuite) TestWriteRead4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestReadNonexistent tests reading content from an empty path. +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { + filename := randomPath(32) + _, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestWriteReadStreams1 tests a simple write-read streaming workflow. +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { + filename := randomPath(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data. +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { + filename := randomPath(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data. +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data. +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + checksum := sha1.New() + var fileSize int64 = 5 * 1024 * 1024 * 1024 + + contents := newRandReader(fileSize) + written, err := suite.StorageDriver.WriteStream(filename, 0, io.TeeReader(contents, checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, fileSize) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + +// TestReadStreamWithOffset tests that the appropriate data is streamed when +// reading with a given offset. +func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + chunkSize := int64(32) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + + err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.ReadStream(filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) +} + +// TestContinueStreamAppendLarge tests that a stream write can be appended to without +// corrupting the data with a large chunk size. +func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { + suite.testContinueStreamAppend(c, int64(10*1024*1024)) +} + +// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only +// with a tiny chunk size in order to test corner cases for some cloud storage drivers. +func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { + suite.testContinueStreamAppend(c, int64(32)) +} + +func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + contentsChunk4 := randomContents(chunkSize) + zeroChunk := make([]byte, int64(chunkSize)) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + fi, err := suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) + + nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + // Test re-writing the last chunk + nn, err = suite.StorageDriver.WriteStream(filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) + + received, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) + + // Writing past size of file extends file (no offest error). We would like + // to write chunk 4 one chunk length past chunk 3. It should be successful + // and the resulting file will be 5 chunks long, with a chunk of all + // zeros. + + fullContents = append(fullContents, zeroChunk...) + fullContents = append(fullContents, contentsChunk4...) + + nn, err = suite.StorageDriver.WriteStream(filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, chunkSize) + + fi, err = suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) + + received, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(len(received), check.Equals, len(fullContents)) + c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) + c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) + c.Assert(received, check.DeepEquals, fullContents) + + // Ensure that negative offsets return correct error. + nn, err = suite.StorageDriver.WriteStream(filename, -1, bytes.NewReader(zeroChunk)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) +} + +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails. +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { + filename := randomPath(32) + + _, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.ReadStream(filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestList checks the returned list of keys after populating a directory tree. +func (suite *DriverSuite) TestList(c *check.C) { + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.StorageDriver.Delete(rootDirectory) + + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(childFile, randomContents(32)) + c.Assert(err, check.IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List("/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(rootDirectory) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(parentDirectory) + c.Assert(err, check.IsNil) + + sort.Strings(keys) + c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). +} + +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination. +func (suite *DriverSuite) TestMove(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(firstPart(sourcePath)) + defer suite.StorageDriver.Delete(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(sourcePath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.StorageDriver.Delete(firstPart(sourcePath)) + defer suite.StorageDriver.Delete(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) + + defer suite.StorageDriver.Delete(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) +} + +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(firstPart(filename)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(filename) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestURLFor checks that the URLFor method functions properly, but only if it +// is implemented +func (suite *DriverSuite) TestURLFor(c *check.C) { + filename := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(firstPart(filename)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + url, err := suite.StorageDriver.URLFor(filename, nil) + if err == storagedriver.ErrUnsupportedMethod { + return + } + c.Assert(err, check.IsNil) + + response, err := http.Get(url) + c.Assert(err, check.IsNil) + defer response.Body.Close() + + read, err := ioutil.ReadAll(response.Body) + c.Assert(err, check.IsNil) + c.Assert(read, check.DeepEquals, contents) + + url, err = suite.StorageDriver.URLFor(filename, map[string]interface{}{"method": "HEAD"}) + if err == storagedriver.ErrUnsupportedMethod { + return + } + c.Assert(err, check.IsNil) + + response, err = http.Head(url) + c.Assert(response.StatusCode, check.Equals, 200) + c.Assert(response.ContentLength, check.Equals, int64(32)) +} + +// TestDeleteNonexistent checks that removing a nonexistent key fails. +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { + filename := randomPath(32) + err := suite.StorageDriver.Delete(filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestDeleteFolder checks that deleting a folder removes all child elements. +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) + + defer suite.StorageDriver.Delete(firstPart(dirname)) + + err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(dirname) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) + filePath := path.Join(dirPath, fileName) + + defer suite.StorageDriver.Delete(firstPart(dirPath)) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(filePath, content) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + fi, err = suite.StorageDriver.Stat(filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + createdTime := fi.ModTime() + + // Sleep and modify the file + time.Sleep(time.Second * 10) + content = randomContents(4096) + err = suite.StorageDriver.PutContent(filePath, content) + c.Assert(err, check.IsNil) + fi, err = suite.StorageDriver.Stat(filePath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) + + // Check if the modification time is after the creation time. + // In case of cloud storage services, storage frontend nodes might have + // time drift between them, however that should be solved with sleeping + // before update. + modTime := fi.ModTime() + if !modTime.After(createdTime) { + c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) + } + + // Call on directory (do not check ModTime as dirs don't need to support it) + fi, err = suite.StorageDriver.Stat(dirPath) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) +} + +// TestPutContentMultipleTimes checks that if storage driver can overwrite the content +// in the subsequent puts. Validates that PutContent does not have to work +// with an offset like WriteStream does and overwrites the file entirely +// rather than writing the data to the [0,len(data)) of the file. +func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { + filename := randomPath(32) + contents := randomContents(4096) + + defer suite.StorageDriver.Delete(firstPart(filename)) + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + contents = randomContents(2048) // upload a different, smaller file + err = suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents) +} + +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.StorageDriver.Delete(firstPart(filename)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.ReadStream(filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to WriteStream concurrently without hanging. +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { + // c.Skip("Need to fix out-of-process concurrency") + // } + + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + + var wg sync.WaitGroup + + testStream := func(size int64) { + defer wg.Done() + suite.testFileStreams(c, size) + } + + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } + + wg.Wait() +} + +// TestEventualConsistency checks that if stat says that a file is a certain size, then +// you can freely read from the file (this is the only guarantee that the driver needs to provide) +func (suite *DriverSuite) TestEventualConsistency(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + var offset int64 + var misswrites int + var chunkSize int64 = 32 + + for i := 0; i < 1024; i++ { + contents := randomContents(chunkSize) + read, err := suite.StorageDriver.WriteStream(filename, offset, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + + fi, err := suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + + // We are most concerned with being able to read data as soon as Stat declares + // it is uploaded. This is the strongest guarantee that some drivers (that guarantee + // at best eventual consistency) absolutely need to provide. + if fi.Size() == offset+chunkSize { + reader, err := suite.StorageDriver.ReadStream(filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) + + reader.Close() + offset += read + } else { + misswrites++ + } + } + + if misswrites > 0 { + c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") + } + + c.Assert(misswrites, check.Not(check.Equals), 1024) +} + +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + written, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + rc, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.StorageDriver.Delete(firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + defer tf.Close() + + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + contents := randomContents(size) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + nn, err := suite.StorageDriver.WriteStream(filename, 0, tf) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(firstPart(filename)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + readContents, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { + defer suite.StorageDriver.Delete(firstPart(filename)) + + nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") + +func randomPath(length int64) string { + path := "/" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + remaining := length - int64(len(path)) + if remaining == 1 { + path += randomFilename(1) + } else if remaining > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { + b := make([]byte, length) + wasSeparator := true + for i := range b { + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } + } + return string(b) +} + +func randomContents(length int64) []byte { + b := make([]byte, length) + for i := range b { + b[i] = byte(rand.Intn(2 << 8)) + } + return b +} + +type randReader struct { + r int64 + m sync.Mutex +} + +func (rr *randReader) Read(p []byte) (n int, err error) { + rr.m.Lock() + defer rr.m.Unlock() + for i := 0; i < len(p) && rr.r > 0; i++ { + p[i] = byte(rand.Intn(255)) + n++ + rr.r-- + } + if rr.r == 0 { + err = io.EOF + } + return +} + +func newRandReader(n int64) *randReader { + return &randReader{r: n} +} + +func firstPart(filePath string) string { + if filePath == "" { + return "/" + } + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "/" || dir == "" { + return "/" + file + } + if file == "" { + return dir + } + filePath = dir + } +} diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index 9bc09afe..4cb2b331 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -9,7 +9,7 @@ import ( "os" "time" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // TODO(stevvooe): Set an optimal buffer size here. We'll have to diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go index 53dd6c9a..7c554e8b 100644 --- a/docs/storage/filereader_test.go +++ b/docs/storage/filereader_test.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/storage/driver/inmemory" ) func TestSimpleRead(t *testing.T) { diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go index 5037f160..cbf03704 100644 --- a/docs/storage/filewriter.go +++ b/docs/storage/filewriter.go @@ -6,7 +6,7 @@ import ( "io" "os" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // fileWriter implements a remote file writer backed by a storage driver. diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index 2235462f..1a38a519 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/storage/driver/inmemory" ) // TestSimpleWrite takes the fileWriter through common write operations diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index c7d64b79..48982993 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver" - "github.com/docker/distribution/storagedriver/inmemory" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "golang.org/x/net/context" ) diff --git a/docs/storage/layerhandler.go b/docs/storage/layerhandler.go index 2755470e..5bcfeddd 100644 --- a/docs/storage/layerhandler.go +++ b/docs/storage/layerhandler.go @@ -4,7 +4,7 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // LayerHandler provides middleware for serving the contents of a Layer. diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index b6578792..e4760b4e 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -7,7 +7,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) type layerStore struct { diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index 54860913..e6cf21a9 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -9,7 +9,7 @@ import ( "github.com/Sirupsen/logrus" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/docker/pkg/tarsum" ) diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 8284ce94..1fd02662 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -8,7 +8,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "golang.org/x/net/context" diff --git a/docs/storage/notifications/listener_test.go b/docs/storage/notifications/listener_test.go index b62e7e7e..b8d6a31e 100644 --- a/docs/storage/notifications/listener_test.go +++ b/docs/storage/notifications/listener_test.go @@ -8,7 +8,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/storagedriver/inmemory" + "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "golang.org/x/net/context" diff --git a/docs/storage/registry.go b/docs/storage/registry.go index ed865007..c4ca2f80 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -1,7 +1,7 @@ package storage import ( - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index f7b87a25..6ae3e5f8 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -4,7 +4,7 @@ import ( "path" "github.com/docker/distribution/digest" - "github.com/docker/distribution/storagedriver" + storagedriver "github.com/docker/distribution/registry/storage/driver" ) // tagStore provides methods to manage manifest tags in a backend storage driver. From fac0f5412d139055e64760cc6155d3b8aa82e1cd Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 10 Feb 2015 18:18:45 -0800 Subject: [PATCH 024/501] Run goimports/gofmt on previous changes After all of the perl refactoring, some import orderings were left asunder. This commit corrects that. Signed-off-by: Stephen J Day --- docs/auth/silly/access.go | 2 +- docs/auth/token/accesscontroller.go | 2 +- docs/client/client.go | 2 +- docs/handlers/context.go | 2 +- docs/handlers/images.go | 2 +- docs/handlers/layer.go | 2 +- docs/handlers/layerupload.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 7d3a4d40..20448efd 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -12,8 +12,8 @@ import ( "net/http" "strings" - "github.com/docker/distribution/registry/auth" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" "golang.org/x/net/context" ) diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go index 61b275a7..cb23eab6 100644 --- a/docs/auth/token/accesscontroller.go +++ b/docs/auth/token/accesscontroller.go @@ -11,8 +11,8 @@ import ( "os" "strings" - "github.com/docker/distribution/registry/auth" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" "golang.org/x/net/context" ) diff --git a/docs/client/client.go b/docs/client/client.go index c697e01c..8e868c41 100644 --- a/docs/client/client.go +++ b/docs/client/client.go @@ -9,9 +9,9 @@ import ( "regexp" "strconv" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" ) // Client implements the client interface to the registry http api diff --git a/docs/handlers/context.go b/docs/handlers/context.go index a49253ee..0d3f44cc 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -4,9 +4,9 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "golang.org/x/net/context" ) diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 6a0e9a40..e41f3682 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -5,10 +5,10 @@ import ( "fmt" "net/http" - "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 8214fbf0..105c2964 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -3,9 +3,9 @@ package handlers import ( "net/http" - "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 83ef6fb6..237644ea 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -7,9 +7,9 @@ import ( "net/url" "os" - "github.com/docker/distribution/registry/api/v2" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) From 8728074d65102e0a90619138a3f0d3507ce2cfba Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Wed, 11 Feb 2015 09:46:23 -0800 Subject: [PATCH 025/501] storagedriver/azure: Add README Signed-off-by: Ahmet Alp Balkan --- docs/storage/driver/azure/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 docs/storage/driver/azure/README.md diff --git a/docs/storage/driver/azure/README.md b/docs/storage/driver/azure/README.md new file mode 100644 index 00000000..f0fd296d --- /dev/null +++ b/docs/storage/driver/azure/README.md @@ -0,0 +1,16 @@ +# Docker Registry Microsoft Azure Blob Storage Driver + + +An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. + +## Parameters + +The following parameters must be used to authenticate and configure the storage driver (case-sensitive): + +* `accountname`: Name of the Azure Storage Account. +* `accountkey`: Primary or Secondary Key for the Storage Account. +* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. + + +[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ +[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx \ No newline at end of file From 553d48d618411feaa6ae947a61f7fd9c9153e68e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 11 Feb 2015 16:49:49 -0800 Subject: [PATCH 026/501] Move layer interface definitions to distribution package After consideration, it has been decided that the interfaces defined in the storage package provide a good base for interacting with various registry instances. Whether interacting with a remote API or a local, on-disk registry, these types have proved flexible. By moving them here, they can become the central components of interacting with distribution components. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 4 +- docs/handlers/context.go | 4 +- docs/handlers/images.go | 3 +- docs/handlers/layer.go | 4 +- docs/handlers/layerupload.go | 8 +- docs/storage/cloudfrontlayerhandler.go | 3 +- docs/storage/delegatelayerhandler.go | 5 +- docs/storage/filereader.go | 35 ++++---- docs/storage/layer.go | 90 --------------------- docs/storage/layer_test.go | 7 +- docs/storage/layerhandler.go | 3 +- docs/storage/layerreader.go | 8 +- docs/storage/layerstore.go | 17 ++-- docs/storage/layerupload.go | 11 +-- docs/storage/manifeststore.go | 5 +- docs/storage/notifications/bridge.go | 25 +++--- docs/storage/notifications/listener.go | 38 ++++----- docs/storage/notifications/listener_test.go | 15 ++-- docs/storage/registry.go | 9 ++- docs/storage/services.go | 84 ------------------- 20 files changed, 113 insertions(+), 265 deletions(-) delete mode 100644 docs/storage/layer.go delete mode 100644 docs/storage/services.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 7a36309b..bc0c46aa 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -7,8 +7,10 @@ import ( "os" "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/notifications" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/registry/storage" @@ -32,7 +34,7 @@ type App struct { router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry storage.Registry // registry is the primary registry backend for the app instance. + registry distribution.Registry // registry is the primary registry backend for the app instance. accessController auth.AccessController // main access controller for application // events contains notification related configuration. diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 0d3f44cc..ee02a53a 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -4,10 +4,10 @@ import ( "fmt" "net/http" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" "golang.org/x/net/context" ) @@ -21,7 +21,7 @@ type Context struct { // Repository is the repository for the current request. All requests // should be scoped to a single repository. This field may be nil. - Repository storage.Repository + Repository distribution.Repository // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e41f3682..0e58984b 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -72,7 +73,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http case storage.ErrManifestVerification: for _, verificationError := range err { switch verificationError := verificationError.(type) { - case storage.ErrUnknownLayer: + case distribution.ErrUnknownLayer: imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) case storage.ErrManifestUnverified: imh.Errors.Push(v2.ErrorCodeManifestUnverified) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 105c2964..69c3df7c 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -3,10 +3,10 @@ package handlers import ( "net/http" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -54,7 +54,7 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { - case storage.ErrUnknownLayer: + case distribution.ErrUnknownLayer: w.WriteHeader(http.StatusNotFound) lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) default: diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 237644ea..fa854449 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -7,10 +7,10 @@ import ( "net/url" "os" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -63,7 +63,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { upload, err := layers.Resume(luh.UUID) if err != nil { ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == storage.ErrLayerUploadUnknown { + if err == distribution.ErrLayerUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) @@ -114,7 +114,7 @@ type layerUploadHandler struct { // UUID identifies the upload instance for the current request. UUID string - Upload storage.LayerUpload + Upload distribution.LayerUpload State layerUploadState } @@ -196,7 +196,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * layer, err := luh.Upload.Finish(dgst) if err != nil { switch err := err.(type) { - case storage.ErrLayerInvalidDigest: + case distribution.ErrLayerInvalidDigest: w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) default: diff --git a/docs/storage/cloudfrontlayerhandler.go b/docs/storage/cloudfrontlayerhandler.go index f887895c..82bc313d 100644 --- a/docs/storage/cloudfrontlayerhandler.go +++ b/docs/storage/cloudfrontlayerhandler.go @@ -10,6 +10,7 @@ import ( "time" "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -95,7 +96,7 @@ func newCloudFrontLayerHandler(storageDriver storagedriver.StorageDriver, option // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontLayerHandler) Resolve(layer Layer) (http.Handler, error) { +func (lh *cloudFrontLayerHandler) Resolve(layer distribution.Layer) (http.Handler, error) { layerURLStr, err := lh.delegateLayerHandler.urlFor(layer, nil) if err != nil { return nil, err diff --git a/docs/storage/delegatelayerhandler.go b/docs/storage/delegatelayerhandler.go index 01354023..62b08b22 100644 --- a/docs/storage/delegatelayerhandler.go +++ b/docs/storage/delegatelayerhandler.go @@ -5,6 +5,7 @@ import ( "net/http" "time" + "github.com/docker/distribution" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -40,7 +41,7 @@ func newDelegateLayerHandler(storageDriver storagedriver.StorageDriver, options // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. -func (lh *delegateLayerHandler) Resolve(layer Layer) (http.Handler, error) { +func (lh *delegateLayerHandler) Resolve(layer distribution.Layer) (http.Handler, error) { // TODO(bbland): This is just a sanity check to ensure that the // storagedriver supports url generation. It would be nice if we didn't have // to do this twice for non-GET requests. @@ -64,7 +65,7 @@ func (lh *delegateLayerHandler) Resolve(layer Layer) (http.Handler, error) { // urlFor returns a download URL for the given layer, or the empty string if // unsupported. -func (lh *delegateLayerHandler) urlFor(layer Layer, options map[string]interface{}) (string, error) { +func (lh *delegateLayerHandler) urlFor(layer distribution.Layer, options map[string]interface{}) (string, error) { // Crack open the layer to get at the layerStore layerRd, ok := layer.(*layerReader) if !ok { diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index 4cb2b331..b70b1fb2 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -125,23 +125,8 @@ func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { return fr.offset, err } -// Close the layer. Should be called when the resource is no longer needed. func (fr *fileReader) Close() error { - if fr.err != nil { - return fr.err - } - - fr.err = ErrLayerClosed - - // close and release reader chain - if fr.rc != nil { - fr.rc.Close() - } - - fr.rc = nil - fr.brd = nil - - return fr.err + return fr.closeWithErr(fmt.Errorf("fileReader: closed")) } // reader prepares the current reader at the lrs offset, ensuring its buffered @@ -199,3 +184,21 @@ func (fr *fileReader) reset() { fr.rc = nil } } + +func (fr *fileReader) closeWithErr(err error) error { + if fr.err != nil { + return fr.err + } + + fr.err = err + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} diff --git a/docs/storage/layer.go b/docs/storage/layer.go deleted file mode 100644 index 5e12f43e..00000000 --- a/docs/storage/layer.go +++ /dev/null @@ -1,90 +0,0 @@ -package storage - -import ( - "fmt" - "io" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -// Layer provides a readable and seekable layer object. Typically, -// implementations are *not* goroutine safe. -type Layer interface { - // http.ServeContent requires an efficient implementation of - // ReadSeeker.Seek(0, os.SEEK_END). - io.ReadSeeker - io.Closer - - // Name returns the repository under which this layer is linked. - Name() string // TODO(stevvooe): struggling with nomenclature: should this be "repo" or "name"? - - // Digest returns the unique digest of the blob, which is the tarsum for - // layers. - Digest() digest.Digest - - // CreatedAt returns the time this layer was created. - CreatedAt() time.Time -} - -// LayerUpload provides a handle for working with in-progress uploads. -// Instances can be obtained from the LayerService.Upload and -// LayerService.Resume. -type LayerUpload interface { - io.WriteSeeker - io.ReaderFrom - io.Closer - - // Name of the repository under which the layer will be linked. - Name() string - - // UUID returns the identifier for this upload. - UUID() string - - // StartedAt returns the time this layer upload was started. - StartedAt() time.Time - - // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The digest is validated against the contents of the - // uploaded layer. - Finish(digest digest.Digest) (Layer, error) - - // Cancel the layer upload process. - Cancel() error -} - -var ( - // ErrLayerExists returned when layer already exists - ErrLayerExists = fmt.Errorf("layer exists") - - // ErrLayerTarSumVersionUnsupported when tarsum is unsupported version. - ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version") - - // ErrLayerUploadUnknown returned when upload is not found. - ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - - // ErrLayerClosed returned when an operation is attempted on a closed - // Layer or LayerUpload. - ErrLayerClosed = fmt.Errorf("layer closed") -) - -// ErrUnknownLayer returned when layer cannot be found. -type ErrUnknownLayer struct { - FSLayer manifest.FSLayer -} - -func (err ErrUnknownLayer) Error() string { - return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum) -} - -// ErrLayerInvalidDigest returned when tarsum check fails. -type ErrLayerInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrLayerInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index 48982993..ec0186db 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -9,6 +9,7 @@ import ( "os" "testing" + "github.com/docker/distribution" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -53,7 +54,7 @@ func TestSimpleLayerUpload(t *testing.T) { // Do a resume, get unknown upload layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != ErrLayerUploadUnknown { + if err != distribution.ErrLayerUploadUnknown { t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) } @@ -102,7 +103,7 @@ func TestSimpleLayerUpload(t *testing.T) { } // After finishing an upload, it should no longer exist. - if _, err := ls.Resume(layerUpload.UUID()); err != ErrLayerUploadUnknown { + if _, err := ls.Resume(layerUpload.UUID()); err != distribution.ErrLayerUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) } @@ -165,7 +166,7 @@ func TestSimpleLayerRead(t *testing.T) { } switch err.(type) { - case ErrUnknownLayer: + case distribution.ErrUnknownLayer: err = nil default: t.Fatalf("unexpected error fetching non-existent layer: %v", err) diff --git a/docs/storage/layerhandler.go b/docs/storage/layerhandler.go index 5bcfeddd..b03bc250 100644 --- a/docs/storage/layerhandler.go +++ b/docs/storage/layerhandler.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + "github.com/docker/distribution" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -13,7 +14,7 @@ type LayerHandler interface { // Layer if possible, or nil and an error when unsupported. This may // directly serve the contents of the layer or issue a redirect to another // URL hosting the content. - Resolve(layer Layer) (http.Handler, error) + Resolve(layer distribution.Layer) (http.Handler, error) } // LayerHandlerInitFunc is the type of a LayerHandler factory function and is diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 4510dd7d..c539b769 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -3,6 +3,7 @@ package storage import ( "time" + "github.com/docker/distribution" "github.com/docker/distribution/digest" ) @@ -15,7 +16,7 @@ type layerReader struct { digest digest.Digest } -var _ Layer = &layerReader{} +var _ distribution.Layer = &layerReader{} func (lrs *layerReader) Name() string { return lrs.name @@ -28,3 +29,8 @@ func (lrs *layerReader) Digest() digest.Digest { func (lrs *layerReader) CreatedAt() time.Time { return lrs.modtime } + +// Close the layer. Should be called when the resource is no longer needed. +func (lrs *layerReader) Close() error { + return lrs.closeWithErr(distribution.ErrLayerClosed) +} diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index e4760b4e..1769eb43 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -4,6 +4,7 @@ import ( "time" "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -23,7 +24,7 @@ func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { if err != nil { switch err.(type) { - case ErrUnknownLayer: + case distribution.ErrUnknownLayer: return false, nil } @@ -33,7 +34,7 @@ func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { return true, nil } -func (ls *layerStore) Fetch(dgst digest.Digest) (Layer, error) { +func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") bp, err := ls.path(dgst) if err != nil { @@ -55,7 +56,7 @@ func (ls *layerStore) Fetch(dgst digest.Digest) (Layer, error) { // Upload begins a layer upload, returning a handle. If the layer upload // is already in progress or the layer has already been uploaded, this // will return an error. -func (ls *layerStore) Upload() (LayerUpload, error) { +func (ls *layerStore) Upload() (distribution.LayerUpload, error) { ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") // NOTE(stevvooe): Consider the issues with allowing concurrent upload of @@ -93,7 +94,7 @@ func (ls *layerStore) Upload() (LayerUpload, error) { // Resume continues an in progress layer upload, returning the current // state of the upload. -func (ls *layerStore) Resume(uuid string) (LayerUpload, error) { +func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ name: ls.repository.Name(), @@ -108,7 +109,7 @@ func (ls *layerStore) Resume(uuid string) (LayerUpload, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, ErrLayerUploadUnknown + return nil, distribution.ErrLayerUploadUnknown default: return nil, err } @@ -132,7 +133,7 @@ func (ls *layerStore) Resume(uuid string) (LayerUpload, error) { } // newLayerUpload allocates a new upload controller with the given state. -func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (LayerUpload, error) { +func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { fw, err := newFileWriter(ls.repository.driver, path) if err != nil { return nil, err @@ -158,7 +159,9 @@ func (ls *layerStore) path(dgst digest.Digest) (string, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return "", ErrUnknownLayer{manifest.FSLayer{BlobSum: dgst}} + return "", distribution.ErrUnknownLayer{ + FSLayer: manifest.FSLayer{BlobSum: dgst}, + } default: return "", err } diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index e6cf21a9..fe3a0721 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -7,6 +7,7 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -24,7 +25,7 @@ type layerUploadController struct { fileWriter } -var _ LayerUpload = &layerUploadController{} +var _ distribution.LayerUpload = &layerUploadController{} // Name of the repository under which the layer will be linked. func (luc *layerUploadController) Name() string { @@ -44,7 +45,7 @@ func (luc *layerUploadController) StartedAt() time.Time { // uploaded layer. The final size and checksum are validated against the // contents of the uploaded layer. The checksum should be provided in the // format :. -func (luc *layerUploadController) Finish(digest digest.Digest) (Layer, error) { +func (luc *layerUploadController) Finish(digest digest.Digest) (distribution.Layer, error) { ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Finish") canonical, err := luc.validateLayer(digest) if err != nil { @@ -93,9 +94,9 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige case tarsum.Version1: default: // version 0 and dev, for now. - return "", ErrLayerInvalidDigest{ + return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, - Reason: ErrLayerTarSumVersionUnsupported, + Reason: distribution.ErrLayerTarSumVersionUnsupported, } } @@ -124,7 +125,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige } if !digestVerifier.Verified() { - return "", ErrLayerInvalidDigest{ + return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, Reason: fmt.Errorf("content does not match digest"), } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 1f798dde..99802905 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -71,7 +72,7 @@ type manifestStore struct { tagStore *tagStore } -var _ ManifestService = &manifestStore{} +var _ distribution.ManifestService = &manifestStore{} // func (ms *manifestStore) Repository() Repository { // return ms.repository @@ -177,7 +178,7 @@ func (ms *manifestStore) verifyManifest(tag string, mnfst *manifest.SignedManife } if !exists { - errs = append(errs, ErrUnknownLayer{FSLayer: fsLayer}) + errs = append(errs, distribution.ErrUnknownLayer{FSLayer: fsLayer}) } } diff --git a/docs/storage/notifications/bridge.go b/docs/storage/notifications/bridge.go index 217ee5bd..9954e7c7 100644 --- a/docs/storage/notifications/bridge.go +++ b/docs/storage/notifications/bridge.go @@ -4,11 +4,10 @@ import ( "net/http" "time" - "github.com/docker/distribution/manifest" - "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/manifest" ) type bridge struct { @@ -53,31 +52,31 @@ func NewRequestRecord(id string, r *http.Request) RequestRecord { } } -func (b *bridge) ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionPush, repo, sm) } -func (b *bridge) ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionPull, repo, sm) } -func (b *bridge) ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { return b.createManifestEventAndWrite(EventActionDelete, repo, sm) } -func (b *bridge) LayerPushed(repo storage.Repository, layer storage.Layer) error { +func (b *bridge) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { return b.createLayerEventAndWrite(EventActionPush, repo, layer.Digest()) } -func (b *bridge) LayerPulled(repo storage.Repository, layer storage.Layer) error { +func (b *bridge) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { return b.createLayerEventAndWrite(EventActionPull, repo, layer.Digest()) } -func (b *bridge) LayerDeleted(repo storage.Repository, layer storage.Layer) error { +func (b *bridge) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { return b.createLayerEventAndWrite(EventActionDelete, repo, layer.Digest()) } -func (b *bridge) createManifestEventAndWrite(action string, repo storage.Repository, sm *manifest.SignedManifest) error { +func (b *bridge) createManifestEventAndWrite(action string, repo distribution.Repository, sm *manifest.SignedManifest) error { event, err := b.createManifestEvent(action, repo, sm) if err != nil { return err @@ -86,7 +85,7 @@ func (b *bridge) createManifestEventAndWrite(action string, repo storage.Reposit return b.sink.Write(*event) } -func (b *bridge) createManifestEvent(action string, repo storage.Repository, sm *manifest.SignedManifest) (*Event, error) { +func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { event := b.createEvent(action) event.Target.Type = EventTargetTypeManifest event.Target.Name = repo.Name() @@ -112,7 +111,7 @@ func (b *bridge) createManifestEvent(action string, repo storage.Repository, sm return event, nil } -func (b *bridge) createLayerEventAndWrite(action string, repo storage.Repository, dgst digest.Digest) error { +func (b *bridge) createLayerEventAndWrite(action string, repo distribution.Repository, dgst digest.Digest) error { event, err := b.createLayerEvent(action, repo, dgst) if err != nil { return err @@ -121,7 +120,7 @@ func (b *bridge) createLayerEventAndWrite(action string, repo storage.Repository return b.sink.Write(*event) } -func (b *bridge) createLayerEvent(action string, repo storage.Repository, dgst digest.Digest) (*Event, error) { +func (b *bridge) createLayerEvent(action string, repo distribution.Repository, dgst digest.Digest) (*Event, error) { event := b.createEvent(action) event.Target.Type = EventTargetTypeBlob event.Target.Name = repo.Name() diff --git a/docs/storage/notifications/listener.go b/docs/storage/notifications/listener.go index 99a06f02..b55fe326 100644 --- a/docs/storage/notifications/listener.go +++ b/docs/storage/notifications/listener.go @@ -2,31 +2,31 @@ package notifications import ( "github.com/Sirupsen/logrus" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage" ) // ManifestListener describes a set of methods for listening to events related to manifests. type ManifestListener interface { - ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error - ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error + ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error + ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. - ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error + ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error } // LayerListener describes a listener that can respond to layer related events. type LayerListener interface { - LayerPushed(repo storage.Repository, layer storage.Layer) error - LayerPulled(repo storage.Repository, layer storage.Layer) error + LayerPushed(repo distribution.Repository, layer distribution.Layer) error + LayerPulled(repo distribution.Repository, layer distribution.Layer) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. - LayerDeleted(repo storage.Repository, layer storage.Layer) error + LayerDeleted(repo distribution.Repository, layer distribution.Layer) error } // Listener combines all repository events into a single interface. @@ -36,26 +36,26 @@ type Listener interface { } type repositoryListener struct { - storage.Repository + distribution.Repository listener Listener } // Listen dispatches events on the repository to the listener. -func Listen(repo storage.Repository, listener Listener) storage.Repository { +func Listen(repo distribution.Repository, listener Listener) distribution.Repository { return &repositoryListener{ Repository: repo, listener: listener, } } -func (rl *repositoryListener) Manifests() storage.ManifestService { +func (rl *repositoryListener) Manifests() distribution.ManifestService { return &manifestServiceListener{ ManifestService: rl.Repository.Manifests(), parent: rl, } } -func (rl *repositoryListener) Layers() storage.LayerService { +func (rl *repositoryListener) Layers() distribution.LayerService { return &layerServiceListener{ LayerService: rl.Repository.Layers(), parent: rl, @@ -63,7 +63,7 @@ func (rl *repositoryListener) Layers() storage.LayerService { } type manifestServiceListener struct { - storage.ManifestService + distribution.ManifestService parent *repositoryListener } @@ -91,11 +91,11 @@ func (msl *manifestServiceListener) Put(tag string, sm *manifest.SignedManifest) } type layerServiceListener struct { - storage.LayerService + distribution.LayerService parent *repositoryListener } -func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (storage.Layer, error) { +func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (distribution.Layer, error) { layer, err := lsl.LayerService.Fetch(dgst) if err == nil { if err := lsl.parent.listener.LayerPulled(lsl.parent.Repository, layer); err != nil { @@ -106,17 +106,17 @@ func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (storage.Layer, error return layer, err } -func (lsl *layerServiceListener) Upload() (storage.LayerUpload, error) { +func (lsl *layerServiceListener) Upload() (distribution.LayerUpload, error) { lu, err := lsl.LayerService.Upload() return lsl.decorateUpload(lu), err } -func (lsl *layerServiceListener) Resume(uuid string) (storage.LayerUpload, error) { +func (lsl *layerServiceListener) Resume(uuid string) (distribution.LayerUpload, error) { lu, err := lsl.LayerService.Resume(uuid) return lsl.decorateUpload(lu), err } -func (lsl *layerServiceListener) decorateUpload(lu storage.LayerUpload) storage.LayerUpload { +func (lsl *layerServiceListener) decorateUpload(lu distribution.LayerUpload) distribution.LayerUpload { return &layerUploadListener{ LayerUpload: lu, parent: lsl, @@ -124,11 +124,11 @@ func (lsl *layerServiceListener) decorateUpload(lu storage.LayerUpload) storage. } type layerUploadListener struct { - storage.LayerUpload + distribution.LayerUpload parent *layerServiceListener } -func (lul *layerUploadListener) Finish(dgst digest.Digest) (storage.Layer, error) { +func (lul *layerUploadListener) Finish(dgst digest.Digest) (distribution.Layer, error) { layer, err := lul.LayerUpload.Finish(dgst) if err == nil { if err := lul.parent.parent.listener.LayerPushed(lul.parent.parent.Repository, layer); err != nil { diff --git a/docs/storage/notifications/listener_test.go b/docs/storage/notifications/listener_test.go index b8d6a31e..0f91a6a3 100644 --- a/docs/storage/notifications/listener_test.go +++ b/docs/storage/notifications/listener_test.go @@ -5,6 +5,7 @@ import ( "reflect" "testing" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/storage" @@ -44,40 +45,40 @@ type testListener struct { ops map[string]int } -func (tl *testListener) ManifestPushed(repo storage.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { tl.ops["manifest:push"]++ return nil } -func (tl *testListener) ManifestPulled(repo storage.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { tl.ops["manifest:pull"]++ return nil } -func (tl *testListener) ManifestDeleted(repo storage.Repository, sm *manifest.SignedManifest) error { +func (tl *testListener) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { tl.ops["manifest:delete"]++ return nil } -func (tl *testListener) LayerPushed(repo storage.Repository, layer storage.Layer) error { +func (tl *testListener) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { tl.ops["layer:push"]++ return nil } -func (tl *testListener) LayerPulled(repo storage.Repository, layer storage.Layer) error { +func (tl *testListener) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { tl.ops["layer:pull"]++ return nil } -func (tl *testListener) LayerDeleted(repo storage.Repository, layer storage.Layer) error { +func (tl *testListener) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { tl.ops["layer:delete"]++ return nil } // checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. -func checkExerciseRepository(t *testing.T, repository storage.Repository) { +func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect diff --git a/docs/storage/registry.go b/docs/storage/registry.go index c4ca2f80..2983751a 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/docker/distribution" storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) @@ -16,7 +17,7 @@ type registry struct { // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver) Registry { +func NewRegistryWithDriver(driver storagedriver.StorageDriver) distribution.Registry { bs := &blobStore{} reg := ®istry{ @@ -35,7 +36,7 @@ func NewRegistryWithDriver(driver storagedriver.StorageDriver) Registry { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) Repository { +func (reg *registry) Repository(ctx context.Context, name string) distribution.Repository { return &repository{ ctx: ctx, registry: reg, @@ -58,7 +59,7 @@ func (repo *repository) Name() string { // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Manifests() ManifestService { +func (repo *repository) Manifests() distribution.ManifestService { return &manifestStore{ repository: repo, revisionStore: &revisionStore{ @@ -73,7 +74,7 @@ func (repo *repository) Manifests() ManifestService { // Layers returns an instance of the LayerService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Layers() LayerService { +func (repo *repository) Layers() distribution.LayerService { return &layerStore{ repository: repo, } diff --git a/docs/storage/services.go b/docs/storage/services.go deleted file mode 100644 index 7e6ac476..00000000 --- a/docs/storage/services.go +++ /dev/null @@ -1,84 +0,0 @@ -package storage - -import ( - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "golang.org/x/net/context" -) - -// TODO(stevvooe): These types need to be moved out of the storage package. - -// Registry represents a collection of repositories, addressable by name. -type Registry interface { - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name string) Repository -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Name returns the name of the repository. - Name() string - - // Manifests returns a reference to this repository's manifest service. - Manifests() ManifestService - - // Layers returns a reference to this repository's layers service. - Layers() LayerService -} - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Tags lists the tags under the named repository. - Tags() ([]string, error) - - // Exists returns true if the manifest exists. - Exists(tag string) (bool, error) - - // Get retrieves the named manifest, if it exists. - Get(tag string) (*manifest.SignedManifest, error) - - // Put creates or updates the named manifest. - // Put(tag string, manifest *manifest.SignedManifest) (digest.Digest, error) - Put(tag string, manifest *manifest.SignedManifest) error - - // Delete removes the named manifest, if it exists. - Delete(tag string) error - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Get(tag string) should be GetByTag(tag string) - // 2. Put(tag string, manifest *manifest.SignedManifest) should be - // Put(manifest *manifest.SignedManifest). The method can read the - // tag on manifest to automatically tag it in the repository. - // 3. Need a GetByDigest(dgst digest.Digest) method. - // 4. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 5. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 6. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 7. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 8. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(digest digest.Digest) (Layer, error) - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload() (LayerUpload, error) - - // Resume continues an in progress layer upload, returning a handle to the - // upload. The caller should seek to the latest desired upload location - // before proceeding. - Resume(uuid string) (LayerUpload, error) -} From 09bf7522347066980c75c5b84b0836cb0c581dcf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 11 Feb 2015 17:00:42 -0800 Subject: [PATCH 027/501] Remove Name from Layer and LayerUpload interface A Layer or LayerUpload should not be coupled with the containing repository. Remove the Name method and correctly reference from the repository where appropriate. Signed-off-by: Stephen J Day --- docs/handlers/layerupload.go | 4 ++-- docs/storage/layerreader.go | 5 ----- docs/storage/layerstore.go | 1 - docs/storage/layerupload.go | 9 ++------- 4 files changed, 4 insertions(+), 15 deletions(-) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index fa854449..63a9e776 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -215,7 +215,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * } // Build our canonical layer url - layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) + layerURL, err := luh.urlBuilder.BuildBlobURL(luh.Repository.Name(), layer.Digest()) if err != nil { luh.Errors.Push(v2.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) @@ -268,7 +268,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt } uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL( - luh.Upload.Name(), luh.Upload.UUID(), + luh.Repository.Name(), luh.Upload.UUID(), url.Values{ "_state": []string{token}, }) diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index c539b769..2d8e588d 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -12,16 +12,11 @@ import ( type layerReader struct { fileReader - name string // repo name of this layer digest digest.Digest } var _ distribution.Layer = &layerReader{} -func (lrs *layerReader) Name() string { - return lrs.name -} - func (lrs *layerReader) Digest() digest.Digest { return lrs.digest } diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index 1769eb43..153e42a8 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -48,7 +48,6 @@ func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { return &layerReader{ fileReader: *fr, - name: ls.repository.Name(), digest: dgst, }, nil } diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index fe3a0721..369a9bd5 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -27,11 +27,6 @@ type layerUploadController struct { var _ distribution.LayerUpload = &layerUploadController{} -// Name of the repository under which the layer will be linked. -func (luc *layerUploadController) Name() string { - return luc.layerStore.repository.Name() -} - // UUID returns the identifier for this upload. func (luc *layerUploadController) UUID() string { return luc.uuid @@ -194,7 +189,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { // named repository for the upload controller. func (luc *layerUploadController) linkLayer(digest digest.Digest) error { layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{ - name: luc.Name(), + name: luc.layerStore.repository.Name(), digest: digest, }) @@ -210,7 +205,7 @@ func (luc *layerUploadController) linkLayer(digest digest.Digest) error { // resources are already not present, no error will be returned. func (luc *layerUploadController) removeResources() error { dataPath, err := luc.layerStore.repository.registry.pm.path(uploadDataPathSpec{ - name: luc.Name(), + name: luc.layerStore.repository.Name(), uuid: luc.uuid, }) From ed8827c3c2de71c0decc615701202b0c8761e9a8 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 11 Feb 2015 17:08:08 -0800 Subject: [PATCH 028/501] Move notifications package to distribution Since the notifications package is now decoupled from storage, we are moving it to the root package. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 1 - docs/storage/notifications/bridge.go | 155 --------- docs/storage/notifications/endpoint.go | 86 ----- docs/storage/notifications/event.go | 154 --------- docs/storage/notifications/event_test.go | 145 --------- docs/storage/notifications/http.go | 145 --------- docs/storage/notifications/http_test.go | 155 --------- docs/storage/notifications/listener.go | 140 -------- docs/storage/notifications/listener_test.go | 154 --------- docs/storage/notifications/metrics.go | 152 --------- docs/storage/notifications/sinks.go | 337 -------------------- docs/storage/notifications/sinks_test.go | 223 ------------- 12 files changed, 1847 deletions(-) delete mode 100644 docs/storage/notifications/bridge.go delete mode 100644 docs/storage/notifications/endpoint.go delete mode 100644 docs/storage/notifications/event.go delete mode 100644 docs/storage/notifications/event_test.go delete mode 100644 docs/storage/notifications/http.go delete mode 100644 docs/storage/notifications/http_test.go delete mode 100644 docs/storage/notifications/listener.go delete mode 100644 docs/storage/notifications/listener_test.go delete mode 100644 docs/storage/notifications/metrics.go delete mode 100644 docs/storage/notifications/sinks.go delete mode 100644 docs/storage/notifications/sinks_test.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index bc0c46aa..3a9f46a7 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -16,7 +16,6 @@ import ( "github.com/docker/distribution/registry/storage" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/registry/storage/notifications" "github.com/gorilla/mux" "golang.org/x/net/context" ) diff --git a/docs/storage/notifications/bridge.go b/docs/storage/notifications/bridge.go deleted file mode 100644 index 9954e7c7..00000000 --- a/docs/storage/notifications/bridge.go +++ /dev/null @@ -1,155 +0,0 @@ -package notifications - -import ( - "net/http" - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -type bridge struct { - ub URLBuilder - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink -} - -var _ Listener = &bridge{} - -// URLBuilder defines a subset of url builder to be used by the event listener. -type URLBuilder interface { - BuildManifestURL(name, tag string) (string, error) - BuildBlobURL(name string, dgst digest.Digest) (string, error) -} - -// NewBridge returns a notification listener that writes records to sink, -// using the actor and source. Any urls populated in the events created by -// this bridge will be created using the URLBuilder. -// TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { - return &bridge{ - ub: ub, - actor: actor, - source: source, - request: request, - sink: sink, - } -} - -// NewRequestRecord builds a RequestRecord for use in NewBridge from an -// http.Request, associating it with a request id. -func NewRequestRecord(id string, r *http.Request) RequestRecord { - return RequestRecord{ - ID: id, - Addr: r.RemoteAddr, - Host: r.Host, - Method: r.Method, - UserAgent: r.UserAgent(), - } -} - -func (b *bridge) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPush, repo, sm) -} - -func (b *bridge) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPull, repo, sm) -} - -func (b *bridge) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionDelete, repo, sm) -} - -func (b *bridge) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPush, repo, layer.Digest()) -} - -func (b *bridge) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionPull, repo, layer.Digest()) -} - -func (b *bridge) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { - return b.createLayerEventAndWrite(EventActionDelete, repo, layer.Digest()) -} - -func (b *bridge) createManifestEventAndWrite(action string, repo distribution.Repository, sm *manifest.SignedManifest) error { - event, err := b.createManifestEvent(action, repo, sm) - if err != nil { - return err - } - - return b.sink.Write(*event) -} - -func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { - event := b.createEvent(action) - event.Target.Type = EventTargetTypeManifest - event.Target.Name = repo.Name() - event.Target.Tag = sm.Tag - - p, err := sm.Payload() - if err != nil { - return nil, err - } - - event.Target.Digest, err = digest.FromBytes(p) - if err != nil { - return nil, err - } - - // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is - // implemented, this should be replaced. - event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) - if err != nil { - return nil, err - } - - return event, nil -} - -func (b *bridge) createLayerEventAndWrite(action string, repo distribution.Repository, dgst digest.Digest) error { - event, err := b.createLayerEvent(action, repo, dgst) - if err != nil { - return err - } - - return b.sink.Write(*event) -} - -func (b *bridge) createLayerEvent(action string, repo distribution.Repository, dgst digest.Digest) (*Event, error) { - event := b.createEvent(action) - event.Target.Type = EventTargetTypeBlob - event.Target.Name = repo.Name() - event.Target.Digest = dgst - - var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo.Name(), dgst) - if err != nil { - return nil, err - } - - return event, nil -} - -// createEvent creates an event with actor and source populated. -func (b *bridge) createEvent(action string) *Event { - event := createEvent(action) - event.Source = b.source - event.Actor = b.actor - event.Request = b.request - - return event -} - -// createEvent returns a new event, timestamped, with the specified action. -func createEvent(action string) *Event { - return &Event{ - ID: uuid.New(), - Timestamp: time.Now(), - Action: action, - } -} diff --git a/docs/storage/notifications/endpoint.go b/docs/storage/notifications/endpoint.go deleted file mode 100644 index dfdb111c..00000000 --- a/docs/storage/notifications/endpoint.go +++ /dev/null @@ -1,86 +0,0 @@ -package notifications - -import ( - "net/http" - "time" -) - -// EndpointConfig covers the optional configuration parameters for an active -// endpoint. -type EndpointConfig struct { - Headers http.Header - Timeout time.Duration - Threshold int - Backoff time.Duration -} - -// defaults set any zero-valued fields to a reasonable default. -func (ec *EndpointConfig) defaults() { - if ec.Timeout <= 0 { - ec.Timeout = time.Second - } - - if ec.Threshold <= 0 { - ec.Threshold = 10 - } - - if ec.Backoff <= 0 { - ec.Backoff = time.Second - } -} - -// Endpoint is a reliable, queued, thread-safe sink that notify external http -// services when events are written. Writes are non-blocking and always -// succeed for callers but events may be queued internally. -type Endpoint struct { - Sink - url string - name string - - EndpointConfig - - metrics *safeMetrics -} - -// NewEndpoint returns a running endpoint, ready to receive events. -func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { - var endpoint Endpoint - endpoint.name = name - endpoint.url = url - endpoint.EndpointConfig = config - endpoint.defaults() - endpoint.metrics = newSafeMetrics() - - // Configures the inmemory queue, retry, http pipeline. - endpoint.Sink = newHTTPSink( - endpoint.url, endpoint.Timeout, endpoint.Headers, - endpoint.metrics.httpStatusListener()) - endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) - endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) - - register(&endpoint) - return &endpoint -} - -// Name returns the name of the endpoint, generally used for debugging. -func (e *Endpoint) Name() string { - return e.name -} - -// URL returns the url of the endpoint. -func (e *Endpoint) URL() string { - return e.url -} - -// ReadMetrics populates em with metrics from the endpoint. -func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { - e.metrics.Lock() - defer e.metrics.Unlock() - - *em = e.metrics.EndpointMetrics - // Map still need to copied in a threadsafe manner. - em.Statuses = make(map[string]int) - for k, v := range e.metrics.Statuses { - em.Statuses[k] = v - } -} diff --git a/docs/storage/notifications/event.go b/docs/storage/notifications/event.go deleted file mode 100644 index c23766fa..00000000 --- a/docs/storage/notifications/event.go +++ /dev/null @@ -1,154 +0,0 @@ -package notifications - -import ( - "fmt" - "time" - - "github.com/docker/distribution/digest" -) - -// EventAction constants used in action field of Event. -const ( - EventActionPull = "pull" - EventActionPush = "push" - EventActionDelete = "delete" -) - -// EventTargetType constants used in Target section of Event. -const ( - EventTargetTypeManifest = "manifest" - EventTargetTypeBlob = "blob" -) - -// EventsMediaType is the mediatype for the json event envelope. If the Event, -// ActorRecord, SourceRecord or Envelope structs change, the version number -// should be incremented. -const EventsMediaType = "application/vnd.docker.distribution.events.v1+json" - -// Envelope defines the fields of a json event envelope message that can hold -// one or more events. -type Envelope struct { - // Events make up the contents of the envelope. Events present in a single - // envelope are not necessarily related. - Events []Event `json:"events,omitempty"` -} - -// TODO(stevvooe): The event type should be separate from the json format. It -// should be defined as an interface. Leaving as is for now since we don't -// need that at this time. If we make this change, the struct below would be -// called "EventRecord". - -// Event provides the fields required to describe a registry event. -type Event struct { - // ID provides a unique identifier for the event. - ID string `json:"id,omitempty"` - - // Timestamp is the time at which the event occurred. - Timestamp time.Time `json:"timestamp,omitempty"` - - // Action indicates what action encompasses the provided event. - Action string `json:"action,omitempty"` - - // Target uniquely describes the target of the event. - Target struct { - // Type should be "manifest" or "blob" - Type string `json:"type,omitempty"` - - // Name identifies the named repository. - Name string `json:"name,omitempty"` - - // Digest should identify the object in the repository. - Digest digest.Digest `json:"digest,omitempty"` - - // Tag is present if the operation involved a tagged manifest. - Tag string `json:"tag,omitempty"` - - // URL provides a link to the content on the relevant repository instance. - URL string `json:"url,omitempty"` - } `json:"target,omitempty"` - - // Request covers the request that generated the event. - Request RequestRecord `json:"request,omitempty"` - - // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorizaton context of the request. - Actor ActorRecord `json:"actor,omitempty"` - - // Source identifies the registry node that generated the event. Put - // differently, while the actor "initiates" the event, the source - // "generates" it. - Source SourceRecord `json:"source,omitempty"` -} - -// ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorizaton context of the request. -// Data in this record can refer to both the initiating client and the -// generating request. -type ActorRecord struct { - // Name corresponds to the subject or username associated with the - // request context that generated the event. - Name string `json:"name,omitempty"` - - // TODO(stevvooe): Look into setting a session cookie to get this - // without docker daemon. - // SessionID - - // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and - // get the actual command. - // Command -} - -// RequestRecord covers the request that generated the event. -type RequestRecord struct { - // ID uniquely identifies the request that initiated the event. - ID string `json:"id"` - - // Addr contains the ip or hostname and possibly port of the client - // connection that initiated the event. This is the RemoteAddr from - // the standard http request. - Addr string `json:"addr,omitempty"` - - // Host is the externally accessible host name of the registry instance, - // as specified by the http host header on incoming requests. - Host string `json:"host,omitempty"` - - // Method has the request method that generated the event. - Method string `json:"method"` - - // UserAgent contains the user agent header of the request. - UserAgent string `json:"useragent"` -} - -// SourceRecord identifies the registry node that generated the event. Put -// differently, while the actor "initiates" the event, the source "generates" -// it. -type SourceRecord struct { - // Addr contains the ip or hostname and the port of the registry node - // that generated the event. Generally, this will be resolved by - // os.Hostname() along with the running port. - Addr string `json:"addr,omitempty"` - - // InstanceID identifies a running instance of an application. Changes - // after each restart. - InstanceID string `json:"instanceID,omitempty"` -} - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("sink: closed") -) - -// Sink accepts and sends events. -type Sink interface { - // Write writes one or more events to the sink. If no error is returned, - // the caller will assume that all events have been committed and will not - // try to send them again. If an error is received, the caller may retry - // sending the event. The caller should cede the slice of memory to the - // sink and not modify it after calling this method. - Write(events ...Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/docs/storage/notifications/event_test.go b/docs/storage/notifications/event_test.go deleted file mode 100644 index cc2180ac..00000000 --- a/docs/storage/notifications/event_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package notifications - -import ( - "encoding/json" - "strings" - "testing" - "time" -) - -// TestEventJSONFormat provides silly test to detect if the event format or -// envelope has changed. If this code fails, the revision of the protocol may -// need to be incremented. -func TestEventEnvelopeJSONFormat(t *testing.T) { - var expected = strings.TrimSpace(` -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "type": "manifest", - "name": "library/test", - "digest": "sha256:0123456789abcdef0", - "tag": "latest", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "type": "blob", - "name": "library/test", - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "type": "blob", - "name": "library/test", - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} - `) - - tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - t.Fatalf("error creating time: %v", err) - } - - var prototype Event - prototype.Action = "push" - prototype.Timestamp = tm - prototype.Actor.Name = "test-actor" - prototype.Request.ID = "asdfasdf" - prototype.Request.Addr = "client.local" - prototype.Request.Host = "registrycluster.local" - prototype.Request.Method = "PUT" - prototype.Request.UserAgent = "test/0.1" - prototype.Source.Addr = "hostname.local:port" - - var manifestPush Event - manifestPush = prototype - manifestPush.ID = "asdf-asdf-asdf-asdf-0" - manifestPush.Target.Digest = "sha256:0123456789abcdef0" - manifestPush.Target.Type = EventTargetTypeManifest - manifestPush.Target.Name = "library/test" - manifestPush.Target.Tag = "latest" - manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush0 Event - layerPush0 = prototype - layerPush0.ID = "asdf-asdf-asdf-asdf-1" - layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" - layerPush0.Target.Type = EventTargetTypeBlob - layerPush0.Target.Name = "library/test" - layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var layerPush1 Event - layerPush1 = prototype - layerPush1.ID = "asdf-asdf-asdf-asdf-2" - layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" - layerPush1.Target.Type = EventTargetTypeBlob - layerPush1.Target.Name = "library/test" - layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" - - var envelope Envelope - envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - t.Fatalf("unexpected error marshaling envelope: %v", err) - } - if string(p) != expected { - t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) - } -} diff --git a/docs/storage/notifications/http.go b/docs/storage/notifications/http.go deleted file mode 100644 index 15b3574c..00000000 --- a/docs/storage/notifications/http.go +++ /dev/null @@ -1,145 +0,0 @@ -package notifications - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" -) - -// httpSink implements a single-flight, http notification endpoint. This is -// very lightweight in that it only makes an attempt at an http request. -// Reliability should be provided by the caller. -type httpSink struct { - url string - - mu sync.Mutex - closed bool - client *http.Client - listeners []httpStatusListener - - // TODO(stevvooe): Allow one to configure the media type accepted by this - // sink and choose the serialization based on that. -} - -// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other -// sinks for increased reliability. -func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { - return &httpSink{ - url: u, - listeners: listeners, - client: &http.Client{ - Transport: &headerRoundTripper{ - Transport: http.DefaultTransport.(*http.Transport), - headers: headers, - }, - Timeout: timeout, - }, - } -} - -// httpStatusListener is called on various outcomes of sending notifications. -type httpStatusListener interface { - success(status int, events ...Event) - failure(status int, events ...Event) - err(err error, events ...Event) -} - -// Accept makes an attempt to notify the endpoint, returning an error if it -// fails. It is the caller's responsibility to retry on error. The events are -// accepted or rejected as a group. -func (hs *httpSink) Write(events ...Event) error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return ErrSinkClosed - } - - envelope := Envelope{ - Events: events, - } - - // TODO(stevvooe): It is not ideal to keep re-encoding the request body on - // retry but we are going to do it to keep the code simple. It is likely - // we could change the event struct to manage its own buffer. - - p, err := json.MarshalIndent(envelope, "", " ") - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) - } - - body := bytes.NewReader(p) - resp, err := hs.client.Post(hs.url, EventsMediaType, body) - if err != nil { - for _, listener := range hs.listeners { - listener.err(err, events...) - } - - return fmt.Errorf("%v: error posting: %v", hs, err) - } - - // The notifier will treat any 2xx or 3xx response as accepted by the - // endpoint. - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - for _, listener := range hs.listeners { - listener.success(resp.StatusCode, events...) - } - - // TODO(stevvooe): This is a little accepting: we may want to support - // unsupported media type responses with retries using the correct - // media type. There may also be cases that will never work. - - return nil - default: - for _, listener := range hs.listeners { - listener.failure(resp.StatusCode, events...) - } - return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) - } -} - -// Close the endpoint -func (hs *httpSink) Close() error { - hs.mu.Lock() - defer hs.mu.Unlock() - - if hs.closed { - return fmt.Errorf("httpsink: already closed") - } - - hs.closed = true - return nil -} - -func (hs *httpSink) String() string { - return fmt.Sprintf("httpSink{%s}", hs.url) -} - -type headerRoundTripper struct { - *http.Transport // must be transport to support CancelRequest - headers http.Header -} - -func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var nreq http.Request - nreq = *req - nreq.Header = make(http.Header) - - merge := func(headers http.Header) { - for k, v := range headers { - nreq.Header[k] = append(nreq.Header[k], v...) - } - } - - merge(req.Header) - merge(hrt.headers) - - return hrt.Transport.RoundTrip(&nreq) -} diff --git a/docs/storage/notifications/http_test.go b/docs/storage/notifications/http_test.go deleted file mode 100644 index c2cfbc02..00000000 --- a/docs/storage/notifications/http_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package notifications - -import ( - "encoding/json" - "fmt" - "mime" - "net/http" - "net/http/httptest" - "reflect" - "strconv" - "testing" -) - -// TestHTTPSink mocks out an http endpoint and notifies it under a couple of -// conditions, ensuring correct behavior. -func TestHTTPSink(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - if r.Method != "POST" { - w.WriteHeader(http.StatusMethodNotAllowed) - t.Fatalf("unexpected request method: %v", r.Method) - return - } - - // Extract the content type and make sure it matches - contentType := r.Header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) - return - } - - if mediaType != EventsMediaType { - w.WriteHeader(http.StatusUnsupportedMediaType) - t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) - return - } - - var envelope Envelope - dec := json.NewDecoder(r.Body) - if err := dec.Decode(&envelope); err != nil { - w.WriteHeader(http.StatusBadRequest) - t.Fatalf("error decoding request body: %v", err) - return - } - - // Let caller choose the status - status, err := strconv.Atoi(r.FormValue("status")) - if err != nil { - t.Logf("error parsing status: %v", err) - - // May just be empty, set status to 200 - status = http.StatusOK - } - - w.WriteHeader(status) - })) - - metrics := newSafeMetrics() - sink := newHTTPSink(server.URL, 0, nil, - &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) - - var expectedMetrics EndpointMetrics - expectedMetrics.Statuses = make(map[string]int) - - for _, tc := range []struct { - events []Event // events to send - url string - failure bool // true if there should be a failure. - statusCode int // if not set, no status code should be incremented. - }{ - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", "manifest")}, - }, - { - statusCode: http.StatusOK, - events: []Event{ - createTestEvent("push", "library/test", "manifest"), - createTestEvent("push", "library/test", "layer"), - createTestEvent("push", "library/test", "layer"), - }, - }, - { - statusCode: http.StatusTemporaryRedirect, - }, - { - statusCode: http.StatusBadRequest, - failure: true, - }, - { - // Case where connection never goes through. - url: "http://shoudlntresolve/", - failure: true, - }, - } { - - if tc.failure { - expectedMetrics.Failures += len(tc.events) - } else { - expectedMetrics.Successes += len(tc.events) - } - - if tc.statusCode > 0 { - expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) - } - - url := tc.url - if url == "" { - url = server.URL + "/" - } - // setup endpoint to respond with expected status code. - url += fmt.Sprintf("?status=%v", tc.statusCode) - sink.url = url - - t.Logf("testcase: %v, fail=%v", url, tc.failure) - // Try a simple event emission. - err := sink.Write(tc.events...) - - if !tc.failure { - if err != nil { - t.Fatalf("unexpected error send event: %v", err) - } - } else { - if err == nil { - t.Fatalf("the endpoint should have rejected the request") - } - } - - if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { - t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) - } - } - - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing http sink: %v", err) - } - - // double close returns error - if err := sink.Close(); err == nil { - t.Fatalf("second close should have returned error: %v", err) - } - -} - -func createTestEvent(action, repo, typ string) Event { - event := createEvent(action) - - event.Target.Type = typ - event.Target.Name = repo - - return *event -} diff --git a/docs/storage/notifications/listener.go b/docs/storage/notifications/listener.go deleted file mode 100644 index b55fe326..00000000 --- a/docs/storage/notifications/listener.go +++ /dev/null @@ -1,140 +0,0 @@ -package notifications - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -// ManifestListener describes a set of methods for listening to events related to manifests. -type ManifestListener interface { - ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error - ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error -} - -// LayerListener describes a listener that can respond to layer related events. -type LayerListener interface { - LayerPushed(repo distribution.Repository, layer distribution.Layer) error - LayerPulled(repo distribution.Repository, layer distribution.Layer) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - LayerDeleted(repo distribution.Repository, layer distribution.Layer) error -} - -// Listener combines all repository events into a single interface. -type Listener interface { - ManifestListener - LayerListener -} - -type repositoryListener struct { - distribution.Repository - listener Listener -} - -// Listen dispatches events on the repository to the listener. -func Listen(repo distribution.Repository, listener Listener) distribution.Repository { - return &repositoryListener{ - Repository: repo, - listener: listener, - } -} - -func (rl *repositoryListener) Manifests() distribution.ManifestService { - return &manifestServiceListener{ - ManifestService: rl.Repository.Manifests(), - parent: rl, - } -} - -func (rl *repositoryListener) Layers() distribution.LayerService { - return &layerServiceListener{ - LayerService: rl.Repository.Layers(), - parent: rl, - } -} - -type manifestServiceListener struct { - distribution.ManifestService - parent *repositoryListener -} - -func (msl *manifestServiceListener) Get(tag string) (*manifest.SignedManifest, error) { - sm, err := msl.ManifestService.Get(tag) - if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository, sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) - } - } - - return sm, err -} - -func (msl *manifestServiceListener) Put(tag string, sm *manifest.SignedManifest) error { - err := msl.ManifestService.Put(tag, sm) - - if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository, sm); err != nil { - logrus.Errorf("error dispatching manifest push to listener: %v", err) - } - } - - return err -} - -type layerServiceListener struct { - distribution.LayerService - parent *repositoryListener -} - -func (lsl *layerServiceListener) Fetch(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lsl.LayerService.Fetch(dgst) - if err == nil { - if err := lsl.parent.listener.LayerPulled(lsl.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer pull to listener: %v", err) - } - } - - return layer, err -} - -func (lsl *layerServiceListener) Upload() (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Upload() - return lsl.decorateUpload(lu), err -} - -func (lsl *layerServiceListener) Resume(uuid string) (distribution.LayerUpload, error) { - lu, err := lsl.LayerService.Resume(uuid) - return lsl.decorateUpload(lu), err -} - -func (lsl *layerServiceListener) decorateUpload(lu distribution.LayerUpload) distribution.LayerUpload { - return &layerUploadListener{ - LayerUpload: lu, - parent: lsl, - } -} - -type layerUploadListener struct { - distribution.LayerUpload - parent *layerServiceListener -} - -func (lul *layerUploadListener) Finish(dgst digest.Digest) (distribution.Layer, error) { - layer, err := lul.LayerUpload.Finish(dgst) - if err == nil { - if err := lul.parent.parent.listener.LayerPushed(lul.parent.parent.Repository, layer); err != nil { - logrus.Errorf("error dispatching layer push to listener: %v", err) - } - } - - return layer, err -} diff --git a/docs/storage/notifications/listener_test.go b/docs/storage/notifications/listener_test.go deleted file mode 100644 index 0f91a6a3..00000000 --- a/docs/storage/notifications/listener_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package notifications - -import ( - "io" - "reflect" - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/driver/inmemory" - "github.com/docker/distribution/testutil" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -func TestListener(t *testing.T) { - registry := storage.NewRegistryWithDriver(inmemory.New()) - tl := &testListener{ - ops: make(map[string]int), - } - ctx := context.Background() - repository := Listen(registry.Repository(ctx, "foo/bar"), tl) - - // Now take the registry through a number of operations - checkExerciseRepository(t, repository) - - expectedOps := map[string]int{ - "manifest:push": 1, - "manifest:pull": 1, - // "manifest:delete": 0, // deletes not supported for now - "layer:push": 2, - "layer:pull": 2, - // "layer:delete": 0, // deletes not supported for now - } - - if !reflect.DeepEqual(tl.ops, expectedOps) { - t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) - } - -} - -type testListener struct { - ops map[string]int -} - -func (tl *testListener) ManifestPushed(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:push"]++ - - return nil -} - -func (tl *testListener) ManifestPulled(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:pull"]++ - return nil -} - -func (tl *testListener) ManifestDeleted(repo distribution.Repository, sm *manifest.SignedManifest) error { - tl.ops["manifest:delete"]++ - return nil -} - -func (tl *testListener) LayerPushed(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:push"]++ - return nil -} - -func (tl *testListener) LayerPulled(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:pull"]++ - return nil -} - -func (tl *testListener) LayerDeleted(repo distribution.Repository, layer distribution.Layer) error { - tl.ops["layer:delete"]++ - return nil -} - -// checkExerciseRegistry takes the registry through all of its operations, -// carrying out generic checks. -func checkExerciseRepository(t *testing.T, repository distribution.Repository) { - // TODO(stevvooe): This would be a nice testutil function. Basically, it - // takes the registry through a common set of operations. This could be - // used to make cross-cutting updates by changing internals that affect - // update counts. Basically, it would make writing tests a lot easier. - - tag := "thetag" - m := manifest.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: repository.Name(), - Tag: tag, - } - - layers := repository.Layers() - for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating test layer: %v", err) - } - dgst := digest.Digest(ds) - upload, err := layers.Upload() - if err != nil { - t.Fatalf("error creating layer upload: %v", err) - } - - // Use the resumes, as well! - upload, err = layers.Resume(upload.UUID()) - if err != nil { - t.Fatalf("error resuming layer upload: %v", err) - } - - io.Copy(upload, rs) - - if _, err := upload.Finish(dgst); err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) - } - - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ - BlobSum: dgst, - }) - - // Then fetch the layers - if _, err := layers.Fetch(dgst); err != nil { - t.Fatalf("error fetching layer: %v", err) - } - } - - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating key: %v", err) - } - - sm, err := manifest.Sign(&m, pk) - if err != nil { - t.Fatalf("unexpected error signing manifest: %v", err) - } - - manifests := repository.Manifests() - - if err := manifests.Put(tag, sm); err != nil { - t.Fatalf("unexpected error putting the manifest: %v", err) - } - - fetched, err := manifests.Get(tag) - if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) - } - - if fetched.Tag != fetched.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } -} diff --git a/docs/storage/notifications/metrics.go b/docs/storage/notifications/metrics.go deleted file mode 100644 index 2a8ffcbd..00000000 --- a/docs/storage/notifications/metrics.go +++ /dev/null @@ -1,152 +0,0 @@ -package notifications - -import ( - "expvar" - "fmt" - "net/http" - "sync" -) - -// EndpointMetrics track various actions taken by the endpoint, typically by -// number of events. The goal of this to export it via expvar but we may find -// some other future solution to be better. -type EndpointMetrics struct { - Pending int // events pending in queue - Events int // total events incoming - Successes int // total events written successfully - Failures int // total events failed - Errors int // total events errored - Statuses map[string]int // status code histogram, per call event -} - -// safeMetrics guards the metrics implementation with a lock and provides a -// safe update function. -type safeMetrics struct { - EndpointMetrics - sync.Mutex // protects statuses map -} - -// newSafeMetrics returns safeMetrics with map allocated. -func newSafeMetrics() *safeMetrics { - var sm safeMetrics - sm.Statuses = make(map[string]int) - return &sm -} - -// httpStatusListener returns the listener for the http sink that updates the -// relevent counters. -func (sm *safeMetrics) httpStatusListener() httpStatusListener { - return &endpointMetricsHTTPStatusListener{ - safeMetrics: sm, - } -} - -// eventQueueListener returns a listener that maintains queue related counters. -func (sm *safeMetrics) eventQueueListener() eventQueueListener { - return &endpointMetricsEventQueueListener{ - safeMetrics: sm, - } -} - -// endpointMetricsHTTPStatusListener increments counters related to http sinks -// for the relevent events. -type endpointMetricsHTTPStatusListener struct { - *safeMetrics -} - -var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} - -func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Successes += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) - emsl.Failures += len(events) -} - -func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { - emsl.safeMetrics.Lock() - defer emsl.safeMetrics.Unlock() - emsl.Errors += len(events) -} - -// endpointMetricsEventQueueListener maintains the incoming events counter and -// the queues pending count. -type endpointMetricsEventQueueListener struct { - *safeMetrics -} - -func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Events += len(events) - eqc.Pending += len(events) -} - -func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { - eqc.Lock() - defer eqc.Unlock() - eqc.Pending -= len(events) -} - -// endpoints is global registry of endpoints used to report metrics to expvar -var endpoints struct { - registered []*Endpoint - mu sync.Mutex -} - -// register places the endpoint into expvar so that stats are tracked. -func register(e *Endpoint) { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - endpoints.registered = append(endpoints.registered, e) -} - -func init() { - // NOTE(stevvooe): Setup registry metrics structure to report to expvar. - // Ideally, we do more metrics through logging but we need some nice - // realtime metrics for queue state for now. - - registry := expvar.Get("registry") - - if registry == nil { - registry = expvar.NewMap("registry") - } - - var notifications expvar.Map - notifications.Init() - notifications.Set("endpoints", expvar.Func(func() interface{} { - endpoints.mu.Lock() - defer endpoints.mu.Unlock() - - var names []interface{} - for _, v := range endpoints.registered { - var epjson struct { - Name string `json:"name"` - URL string `json:"url"` - EndpointConfig - - Metrics EndpointMetrics - } - - epjson.Name = v.Name() - epjson.URL = v.URL() - epjson.EndpointConfig = v.EndpointConfig - - v.ReadMetrics(&epjson.Metrics) - - names = append(names, epjson) - } - - return names - })) - - registry.(*expvar.Map).Set("notifications", ¬ifications) -} diff --git a/docs/storage/notifications/sinks.go b/docs/storage/notifications/sinks.go deleted file mode 100644 index 2bf63e2d..00000000 --- a/docs/storage/notifications/sinks.go +++ /dev/null @@ -1,337 +0,0 @@ -package notifications - -import ( - "container/list" - "fmt" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// NOTE(stevvooe): This file contains definitions for several utility sinks. -// Typically, the broadcaster is the only sink that should be required -// externally, but others are suitable for export if the need arises. Albeit, -// the tight integration with endpoint metrics should be removed. - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan []Event - closed chan chan struct{} -} - -// NewBroadcaster ... -// Add appends one or more sinks to the list of sinks. The broadcaster -// behavior will be affected by the properties of the sink. Generally, the -// sink should accept all messages and deal with reliability on its own. Use -// of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan []Event), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts a block of events to be dispatched to all sinks. This method -// will never fail and should never block (hopefully!). The caller cedes the -// slice memory to the broadcaster and should not modify it after calling -// write. -func (b *Broadcaster) Write(events ...Event) error { - select { - case b.events <- events: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - logrus.Infof("broadcaster: closing") - select { - case <-b.closed: - // already closed - return fmt.Errorf("broadcaster: already closed") - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - for { - select { - case block := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(block...); err != nil { - logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) - } - } - case closing := <-b.closed: - - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil { - logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) - } - } - closing <- struct{}{} - - logrus.Debugf("broadcaster: closed") - return - } - } -} - -// eventQueue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type eventQueue struct { - sink Sink - events *list.List - listeners []eventQueueListener - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// eventQueueListener is called when various events happen on the queue. -type eventQueueListener interface { - ingress(events ...Event) - egress(events ...Event) -} - -// newEventQueue returns a queue to the provided sink. If the updater is non- -// nil, it will be called to update pending metrics on ingress and egress. -func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { - eq := eventQueue{ - sink: sink, - events: list.New(), - listeners: listeners, - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *eventQueue) Write(events ...Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - for _, listener := range eq.listeners { - listener.ingress(events...) - } - eq.events.PushBack(events) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *eventQueue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return fmt.Errorf("eventqueue: already closed") - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - - return eq.sink.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *eventQueue) run() { - for { - block := eq.next() - - if block == nil { - return // nil block means event queue is closed. - } - - if err := eq.sink.Write(block...); err != nil { - logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) - } - - for _, listener := range eq.listeners { - listener.egress(block...) - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *eventQueue) next() []Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.([]Event) - eq.events.Remove(front) - - return block -} - -// retryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Internally, it is a circuit breaker retries to manage reset. -// Concurrent calls to a retrying sink are serialized through the sink, -// meaning that if one is in-flight, another will not proceed. -type retryingSink struct { - mu sync.Mutex - sink Sink - closed bool - - // circuit breaker hueristics - failures struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - } -} - -type retryingSinkListener interface { - active(events ...Event) - retry(events ...Event) -} - -// TODO(stevvooe): We are using circuit break here, which actually doesn't -// make a whole lot of sense for this use case, since we always retry. Move -// this to use bounded exponential backoff. - -// newRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { - rs := &retryingSink{ - sink: sink, - } - rs.failures.threshold = threshold - rs.failures.backoff = backoff - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *retryingSink) Write(events ...Event) error { - rs.mu.Lock() - defer rs.mu.Unlock() - -retry: - - if rs.closed { - return ErrSinkClosed - } - - if !rs.proceed() { - logrus.Warnf("%v encountered too many errors, backing off", rs.sink) - rs.wait(rs.failures.backoff) - goto retry - } - - if err := rs.write(events...); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logrus.Errorf("retryingsink: error writing events: %v, retrying", err) - goto retry - } - - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *retryingSink) Close() error { - rs.mu.Lock() - defer rs.mu.Unlock() - - if rs.closed { - return fmt.Errorf("retryingsink: already closed") - } - - rs.closed = true - return rs.sink.Close() -} - -// write provides a helper that dispatches failure and success properly. Used -// by write as the single-flight write call. -func (rs *retryingSink) write(events ...Event) error { - if err := rs.sink.Write(events...); err != nil { - rs.failure() - return err - } - - rs.reset() - return nil -} - -// wait backoff time against the sink, unlocking so others can proceed. Should -// only be called by methods that currently have the mutex. -func (rs *retryingSink) wait(backoff time.Duration) { - rs.mu.Unlock() - defer rs.mu.Lock() - - // backoff here - time.Sleep(backoff) -} - -// reset marks a succesful call. -func (rs *retryingSink) reset() { - rs.failures.recent = 0 - rs.failures.last = time.Time{} -} - -// failure records a failure. -func (rs *retryingSink) failure() { - rs.failures.recent++ - rs.failures.last = time.Now().UTC() -} - -// proceed returns true if the call should proceed based on circuit breaker -// hueristics. -func (rs *retryingSink) proceed() bool { - return rs.failures.recent < rs.failures.threshold || - time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) -} diff --git a/docs/storage/notifications/sinks_test.go b/docs/storage/notifications/sinks_test.go deleted file mode 100644 index 89756a99..00000000 --- a/docs/storage/notifications/sinks_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package notifications - -import ( - "fmt" - "math/rand" - "sync" - "time" - - "github.com/Sirupsen/logrus" - - "testing" -) - -func TestBroadcaster(t *testing.T) { - const nEvents = 1000 - var sinks []Sink - - for i := 0; i < 10; i++ { - sinks = append(sinks, &testSink{}) - } - - b := NewBroadcaster(sinks...) - - var block []Event - var wg sync.WaitGroup - for i := 1; i <= nEvents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := b.Write(block...); err != nil { - t.Fatalf("error writing block of length %d: %v", len(block), err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() // Wait until writes complete - checkClose(t, b) - - // Iterate through the sinks and check that they all have the expected length. - for _, sink := range sinks { - ts := sink.(*testSink) - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != nEvents { - t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - } - -} - -func TestEventQueue(t *testing.T) { - const nevents = 1000 - var ts testSink - metrics := newSafeMetrics() - eq := newEventQueue( - // delayed sync simulates destination slower than channel comms - &delayedSink{ - Sink: &ts, - delay: time.Millisecond * 1, - }, metrics.eventQueueListener()) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= nevents; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - if err := eq.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - wg.Done() - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, eq) - - ts.mu.Lock() - defer ts.mu.Unlock() - metrics.Lock() - defer metrics.Unlock() - - if len(ts.events) != nevents { - t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) - } - - if !ts.closed { - t.Fatalf("sink should have been closed") - } - - if metrics.Events != nevents { - t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) - } - - if metrics.Pending != 0 { - t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) - } -} - -func TestRetryingSink(t *testing.T) { - - // Make a sync that fails most of the time, ensuring that all the events - // make it through. - var ts testSink - flaky := &flakySink{ - rate: 1.0, // start out always failing. - Sink: &ts, - } - s := newRetryingSink(flaky, 3, 10*time.Millisecond) - - var wg sync.WaitGroup - var block []Event - for i := 1; i <= 100; i++ { - block = append(block, createTestEvent("push", "library/test", "blob")) - - // Above 50, set the failure rate lower - if i > 50 { - s.mu.Lock() - flaky.rate = 0.90 - s.mu.Unlock() - } - - if i%10 == 0 && i > 0 { - wg.Add(1) - go func(block ...Event) { - defer wg.Done() - if err := s.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) - } - }(block...) - - block = nil - } - } - - wg.Wait() - checkClose(t, s) - - ts.mu.Lock() - defer ts.mu.Unlock() - - if len(ts.events) != 100 { - t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) - } -} - -type testSink struct { - events []Event - mu sync.Mutex - closed bool -} - -func (ts *testSink) Write(events ...Event) error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.events = append(ts.events, events...) - return nil -} - -func (ts *testSink) Close() error { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.closed = true - - logrus.Infof("closing testSink") - return nil -} - -type delayedSink struct { - Sink - delay time.Duration -} - -func (ds *delayedSink) Write(events ...Event) error { - time.Sleep(ds.delay) - return ds.Sink.Write(events...) -} - -type flakySink struct { - Sink - rate float64 -} - -func (fs *flakySink) Write(events ...Event) error { - if rand.Float64() < fs.rate { - return fmt.Errorf("error writing %d events", len(events)) - } - - return fs.Sink.Write(events...) -} - -func checkClose(t *testing.T, sink Sink) { - if err := sink.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - // second close should not crash but should return an error. - if err := sink.Close(); err == nil { - t.Fatalf("no error on double close") - } - - // Write after closed should be an error - if err := sink.Write([]Event{}...); err == nil { - t.Fatalf("write after closed did not have an error") - } else if err != ErrSinkClosed { - t.Fatalf("error should be ErrSinkClosed") - } -} From 3e906311c6faaf8ae46436d5e56c144cf2d72620 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 13 Feb 2015 13:59:50 -0800 Subject: [PATCH 029/501] Add error return to Repository method on Registry The method (Registry).Repository may now return an error. This is too allow certain implementationt to validate the name or opt to not return a repository under certain conditions. In conjunction with this change, error declarations have been moved into a single file in the distribution package. Several error declarations that had remained in the storage package have been moved into distribution, as well. The declarations for Layer and LayerUpload have also been moved into the main registry file, as a result. Signed-off-by: Stephen J Day --- docs/api/v2/names.go | 8 ++-- docs/handlers/app.go | 38 ++++++++++++++---- docs/handlers/images.go | 7 ++-- docs/handlers/tags.go | 4 +- docs/storage/layer_test.go | 18 +++++++-- docs/storage/manifeststore.go | 62 ++---------------------------- docs/storage/manifeststore_test.go | 8 +++- docs/storage/registry.go | 12 +++++- docs/storage/revisionstore.go | 3 +- docs/storage/tagstore.go | 5 ++- 10 files changed, 79 insertions(+), 86 deletions(-) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index d05eeb6a..ffac1858 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -6,6 +6,10 @@ import ( "strings" ) +// TODO(stevvooe): Move these definitions back to an exported package. While +// they are used with v2 definitions, their relevance expands beyond. +// "distribution/names" is a candidate package. + const ( // RepositoryNameComponentMinLength is the minimum number of characters in a // single repository name slash-delimited component @@ -37,10 +41,6 @@ var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9 // RepositoryNameComponentRegexp which must completely match the content var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) -// TODO(stevvooe): RepositoryName needs to be limited to some fixed length. -// Looking path prefixes and s3 limitation of 1024, this should likely be -// around 512 bytes. 256 bytes might be more manageable. - // RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to // 5 path components, separated by a forward slash. var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String()) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 3a9f46a7..2202de4a 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -227,10 +227,30 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return } - // decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( - app.registry.Repository(context, getName(context)), - app.eventBridge(context, r)) + if app.nameRequired(r) { + repository, err := app.registry.Repository(context, getName(context)) + + if err != nil { + ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + + switch err := err.(type) { + case distribution.ErrRepositoryUnknown: + context.Errors.Push(v2.ErrorCodeNameUnknown, err) + case distribution.ErrRepositoryNameInvalid: + context.Errors.Push(v2.ErrorCodeNameInvalid, err) + } + + w.WriteHeader(http.StatusBadRequest) + serveJSON(w, context.Errors) + return + } + + // assign and decorate the authorized repository with an event bridge. + context.Repository = notifications.Listen( + repository, + app.eventBridge(context, r)) + } + handler := dispatch(context, r) ssrw := &singleStatusResponseWriter{ResponseWriter: w} @@ -318,9 +338,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont } } else { // Only allow the name not to be set on the base route. - route := mux.CurrentRoute(r) - - if route == nil || route.GetName() != v2.RouteNameBase { + if app.nameRequired(r) { // For this to be properly secured, repo must always be set for a // resource that may make a modification. The only condition under // which name is not set and we still allow access is when the @@ -378,6 +396,12 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) } +// nameRequired returns true if the route requires a name. +func (app *App) nameRequired(r *http.Request) bool { + route := mux.CurrentRoute(r) + return route == nil || route.GetName() != v2.RouteNameBase +} + // apiBase implements a simple yes-man for doing overall checks against the // api. This can support auth roundtrips to support docker login. func apiBase(w http.ResponseWriter, r *http.Request) { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 0e58984b..de7b6dd6 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -10,7 +10,6 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -70,12 +69,12 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. switch err := err.(type) { - case storage.ErrManifestVerification: + case distribution.ErrManifestVerification: for _, verificationError := range err { switch verificationError := verificationError.(type) { case distribution.ErrUnknownLayer: imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) - case storage.ErrManifestUnverified: + case distribution.ErrManifestUnverified: imh.Errors.Push(v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { @@ -104,7 +103,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h manifests := imh.Repository.Manifests() if err := manifests.Delete(imh.Tag); err != nil { switch err := err.(type) { - case storage.ErrUnknownManifest: + case distribution.ErrManifestUnknown: imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) w.WriteHeader(http.StatusNotFound) default: diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 0a764693..be84fae5 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -4,8 +4,8 @@ import ( "encoding/json" "net/http" + "github.com/docker/distribution" "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -38,7 +38,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { tags, err := manifests.Tags() if err != nil { switch err := err.(type) { - case storage.ErrUnknownRepository: + case distribution.ErrRepositoryUnknown: w.WriteHeader(404) th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) default: diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index ec0186db..ea101b53 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -36,7 +36,11 @@ func TestSimpleLayerUpload(t *testing.T) { imageName := "foo/bar" driver := inmemory.New() registry := NewRegistryWithDriver(driver) - ls := registry.Repository(ctx, imageName).Layers() + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ls := repository.Layers() h := sha256.New() rd := io.TeeReader(randomDataReader, h) @@ -140,7 +144,11 @@ func TestSimpleLayerRead(t *testing.T) { imageName := "foo/bar" driver := inmemory.New() registry := NewRegistryWithDriver(driver) - ls := registry.Repository(ctx, imageName).Layers() + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ls := repository.Layers() randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { @@ -245,7 +253,11 @@ func TestLayerUploadZeroLength(t *testing.T) { imageName := "foo/bar" driver := inmemory.New() registry := NewRegistryWithDriver(driver) - ls := registry.Repository(ctx, imageName).Layers() + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ls := repository.Layers() upload, err := ls.Upload() if err != nil { diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 99802905..765b5d05 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -2,69 +2,13 @@ package storage import ( "fmt" - "strings" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) -// ErrUnknownRepository is returned if the named repository is not known by -// the registry. -type ErrUnknownRepository struct { - Name string -} - -func (err ErrUnknownRepository) Error() string { - return fmt.Sprintf("unknown respository name=%s", err.Name) -} - -// ErrUnknownManifest is returned if the manifest is not known by the -// registry. -type ErrUnknownManifest struct { - Name string - Tag string -} - -func (err ErrUnknownManifest) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrUnknownManifestRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrUnknownManifestRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrUnknownManifestRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - type manifestStore struct { repository *repository @@ -147,7 +91,7 @@ func (ms *manifestStore) Delete(tag string) error { // registry only tries to store valid content, leaving trust policies of that // content up to consumers. func (ms *manifestStore) verifyManifest(tag string, mnfst *manifest.SignedManifest) error { - var errs ErrManifestVerification + var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { // TODO(stevvooe): This needs to be an exported error errs = append(errs, fmt.Errorf("repository name does not match manifest name")) @@ -161,10 +105,10 @@ func (ms *manifestStore) verifyManifest(tag string, mnfst *manifest.SignedManife if _, err := manifest.Verify(mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, ErrManifestUnverified{}) + errs = append(errs, distribution.ErrManifestUnverified{}) default: if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, ErrManifestUnverified{}) + errs = append(errs, distribution.ErrManifestUnverified{}) } else { errs = append(errs, err) } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 1fd02662..d3a55ce5 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -20,7 +21,10 @@ func TestManifestStorage(t *testing.T) { tag := "thetag" driver := inmemory.New() registry := NewRegistryWithDriver(driver) - repo := registry.Repository(ctx, name) + repo, err := registry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } ms := repo.Manifests() exists, err := ms.Exists(tag) @@ -34,7 +38,7 @@ func TestManifestStorage(t *testing.T) { if _, err := ms.Get(tag); true { switch err.(type) { - case ErrUnknownManifest: + case distribution.ErrManifestUnknown: break default: t.Fatalf("expected manifest unknown error: %#v", err) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 2983751a..1a402f36 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -2,6 +2,7 @@ package storage import ( "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/v2" storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) @@ -36,12 +37,19 @@ func NewRegistryWithDriver(driver storagedriver.StorageDriver) distribution.Regi // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) distribution.Repository { +func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + if err := v2.ValidateRespositoryName(name); err != nil { + return nil, distribution.ErrRepositoryNameInvalid{ + Name: name, + Reason: err, + } + } + return &repository{ ctx: ctx, registry: reg, name: name, - } + }, nil } // repository provides name-scoped access to various services. diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index b3ecd711..e7122f3e 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -5,6 +5,7 @@ import ( "path" "github.com/Sirupsen/logrus" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" @@ -40,7 +41,7 @@ func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, if exists, err := rs.exists(revision); err != nil { return nil, err } else if !exists { - return nil, ErrUnknownManifestRevision{ + return nil, distribution.ErrUnknownManifestRevision{ Name: rs.Name(), Revision: revision, } diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 6ae3e5f8..147623a2 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -3,6 +3,7 @@ package storage import ( "path" + "github.com/docker/distribution" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -26,7 +27,7 @@ func (ts *tagStore) tags() ([]string, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, ErrUnknownRepository{Name: ts.name} + return nil, distribution.ErrRepositoryUnknown{Name: ts.name} default: return nil, err } @@ -104,7 +105,7 @@ func (ts *tagStore) resolve(tag string) (digest.Digest, error) { if exists, err := exists(ts.driver, currentPath); err != nil { return "", err } else if !exists { - return "", ErrUnknownManifest{Name: ts.Name(), Tag: tag} + return "", distribution.ErrManifestUnknown{Name: ts.Name(), Tag: tag} } revision, err := ts.blobStore.readlink(currentPath) From 02718ee277575a455d092d439375b7b62c673df9 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Thu, 19 Feb 2015 16:28:32 -0800 Subject: [PATCH 030/501] Add an empty root directory s3 driver specific test --- docs/storage/driver/s3/s3_test.go | 75 ++++++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 11 deletions(-) diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index fb2003e1..69543bcb 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -16,6 +16,8 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } +type S3DriverConstructor func(rootDirectory string) (*Driver, error) + func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") secretKey := os.Getenv("AWS_SECRET_KEY") @@ -30,7 +32,7 @@ func init() { } defer os.Remove(root) - s3DriverConstructor := func(region aws.Region) (storagedriver.StorageDriver, error) { + s3DriverConstructor := func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -47,7 +49,7 @@ func init() { } } - v4AuthBool := true + v4AuthBool := false if v4auth != "" { v4AuthBool, err = strconv.ParseBool(v4auth) if err != nil { @@ -59,12 +61,12 @@ func init() { accessKey, secretKey, bucket, - region, + aws.GetRegion(region), encryptBool, secureBool, v4AuthBool, minChunkSize, - root, + rootDirectory, } return New(parameters) @@ -78,14 +80,18 @@ func init() { return "" } - // for _, region := range aws.Regions { - // if region == aws.USGovWest { - // continue - // } + driverConstructor := func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root) + } + + testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) + + // s3Constructor := func() (*Driver, error) { + // return s3DriverConstructor(aws.GetRegion(region)) + // } + + RegisterS3DriverSuite(s3DriverConstructor, skipCheck) - testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(aws.GetRegion(region)) - }, skipCheck) // testsuites.RegisterIPCSuite(driverName, map[string]string{ // "accesskey": accessKey, // "secretkey": secretKey, @@ -95,3 +101,50 @@ func init() { // }, skipCheck) // } } + +func RegisterS3DriverSuite(s3DriverConstructor S3DriverConstructor, skipCheck testsuites.SkipCheck) { + check.Suite(&S3DriverSuite{ + Constructor: s3DriverConstructor, + SkipCheck: skipCheck, + }) +} + +type S3DriverSuite struct { + Constructor S3DriverConstructor + testsuites.SkipCheck +} + +func (suite *S3DriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } +} + +func (suite *S3DriverSuite) TestEmptyRootList(c *check.C) { + validRoot, err := ioutil.TempDir("", "driver-") + c.Assert(err, check.IsNil) + defer os.Remove(validRoot) + + rootedDriver, err := suite.Constructor(validRoot) + c.Assert(err, check.IsNil) + emptyRootDriver, err := suite.Constructor("") + c.Assert(err, check.IsNil) + slashRootDriver, err := suite.Constructor("/") + c.Assert(err, check.IsNil) + + filename := "/test" + contents := []byte("contents") + err = rootedDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + defer rootedDriver.Delete(filename) + + keys, err := emptyRootDriver.List("/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } + + keys, err = slashRootDriver.List("/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } +} From 58269e73fc6bbf70ce82bdfae88ffdd58d6e5ff7 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Thu, 19 Feb 2015 16:31:34 -0800 Subject: [PATCH 031/501] Fix S3 driver's list when the root directory is either "" or "/" --- docs/storage/driver/s3/s3.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index eb9f08f4..d240c901 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -587,6 +587,15 @@ func (d *driver) List(path string) ([]string, error) { if path != "/" && path[len(path)-1] != '/' { path = path + "/" } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) if err != nil { return nil, err @@ -597,11 +606,11 @@ func (d *driver) List(path string) ([]string, error) { for { for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), "", 1)) + files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) } for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), "", 1)) + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) } if listResponse.IsTruncated { From b87459b363f7bd0769381632df2083f3080c2b44 Mon Sep 17 00:00:00 2001 From: Donald Huang Date: Fri, 20 Feb 2015 00:46:24 +0000 Subject: [PATCH 032/501] Rename auth.token.rootCertBundle yml field Renames auth.token.rootCertBundle field in registry config to rootcertbundle so that the REGISTRY_AUTH_TOKEN_ROOTCERTBUNDLE environment variable will override it. See ()[https://github.com/docker/distribution/blob/master/configuration/parser.go#L155] Signed-off-by: Donald Huang --- docs/auth/token/accesscontroller.go | 2 +- docs/auth/token/token_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go index cb23eab6..4547336a 100644 --- a/docs/auth/token/accesscontroller.go +++ b/docs/auth/token/accesscontroller.go @@ -146,7 +146,7 @@ type tokenAccessOptions struct { func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { var opts tokenAccessOptions - keys := []string{"realm", "issuer", "service", "rootCertBundle"} + keys := []string{"realm", "issuer", "service", "rootcertbundle"} vals := make([]string, 0, len(keys)) for _, key := range keys { val, ok := options[key].(string) diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go index 791eb214..9d84d4ef 100644 --- a/docs/auth/token/token_test.go +++ b/docs/auth/token/token_test.go @@ -261,7 +261,7 @@ func TestAccessController(t *testing.T) { "realm": realm, "issuer": issuer, "service": service, - "rootCertBundle": rootCertBundleFilename, + "rootcertbundle": rootCertBundleFilename, } accessController, err := newAccessController(options) From 606c5c8c5785d758e7984e69cda0eb4c5fd26fcd Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 19 Feb 2015 17:14:25 -0800 Subject: [PATCH 033/501] A digest missing parameter should result in a bad request Signed-off-by: Stephen J Day --- docs/handlers/layerupload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 63a9e776..3a852043 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -174,7 +174,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * if dgstStr == "" { // no digest? return error, but allow retry. - w.WriteHeader(http.StatusNotFound) + w.WriteHeader(http.StatusBadRequest) luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") return } From 871cf9dd0147fda76dc07c687483ff85b57f2ca0 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Tue, 24 Feb 2015 14:59:01 -0800 Subject: [PATCH 034/501] Path prefix support for running registry somewhere other than root of server Signed-off-by: David Lawrence (github: endophage) --- docs/api/v2/descriptors.go | 5 +++ docs/api/v2/routes.go | 17 +++++++-- docs/api/v2/routes_test.go | 13 ++++++- docs/api/v2/urls.go | 15 ++++++++ docs/api/v2/urls_test.go | 70 ++++++++++++++++++++++++++++++++++++++ docs/handlers/api_test.go | 48 +++++++++++++++++++++++--- docs/handlers/app.go | 2 +- 7 files changed, 161 insertions(+), 9 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 2c6fafd0..e2007a2e 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -1410,13 +1410,18 @@ var errorDescriptors = []ErrorDescriptor{ var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor var idToDescriptors map[string]ErrorDescriptor +var routeDescriptorsMap map[string]RouteDescriptor func init() { errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) for _, descriptor := range errorDescriptors { errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor } + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } } diff --git a/docs/api/v2/routes.go b/docs/api/v2/routes.go index ef933600..69f9d901 100644 --- a/docs/api/v2/routes.go +++ b/docs/api/v2/routes.go @@ -25,12 +25,23 @@ var allEndpoints = []string{ // methods. This can be used directly by both server implementations and // clients. func Router() *mux.Router { - router := mux.NewRouter(). - StrictSlash(true) + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) for _, descriptor := range routeDescriptors { router.Path(descriptor.Path).Name(descriptor.Name) } - return router + return rootRouter } diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index af424616..dfd11082 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/http/httptest" "reflect" + "strings" "testing" "github.com/gorilla/mux" @@ -24,8 +25,16 @@ type routeTestCase struct { // // This may go away as the application structure comes together. func TestRouter(t *testing.T) { + baseTestRouter(t, "") +} - router := Router() +func TestRouterWithPrefix(t *testing.T) { + baseTestRouter(t, "/prefix/") +} + +func baseTestRouter(t *testing.T, prefix string) { + + router := RouterWithPrefix(prefix) testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testCase := routeTestCase{ @@ -147,6 +156,8 @@ func TestRouter(t *testing.T) { StatusCode: http.StatusNotFound, }, } { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + // Register the endpoint route := router.GetRoute(testcase.RouteName) if route == nil { diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 6f2fd6e8..e36afdab 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -3,6 +3,7 @@ package v2 import ( "net/http" "net/url" + "strings" "github.com/docker/distribution/digest" "github.com/gorilla/mux" @@ -64,11 +65,21 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { host = forwardedHost } + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + u := &url.URL{ Scheme: scheme, Host: host, } + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + return NewURLBuilder(u) } @@ -171,6 +182,10 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + return cr.root.ResolveReference(routeURL), nil } diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index d8001c2a..237d0f61 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -108,6 +108,35 @@ func TestURLBuilder(t *testing.T) { } } +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root[0:len(root)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + type builderFromRequestTestCase struct { request *http.Request base string @@ -153,3 +182,44 @@ func TestBuilderFromRequest(t *testing.T) { } } } + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index a14e93dc..f400f83e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -12,6 +12,7 @@ import ( "net/url" "os" "reflect" + "strings" "testing" "github.com/docker/distribution/configuration" @@ -57,6 +58,40 @@ func TestCheckAPI(t *testing.T) { } } +func TestURLPrefix(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + config.HTTP.Prefix = "/test/" + + env := newTestEnvWithConfig(t, &config) + + baseURL, err := env.builder.BuildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + parsed, _ := url.Parse(baseURL) + if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { + t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Length": []string{"2"}, + }) + +} + // TestLayerAPI conducts a full of the of the layer api. func TestLayerAPI(t *testing.T) { // TODO(stevvooe): This test code is complete junk but it should cover the @@ -356,16 +391,21 @@ type testEnv struct { } func newTestEnv(t *testing.T) *testEnv { - ctx := context.Background() config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, } - app := NewApp(ctx, config) + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, *config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) if err != nil { t.Fatalf("error creating url builder: %v", err) @@ -379,7 +419,7 @@ func newTestEnv(t *testing.T) *testEnv { return &testEnv{ pk: pk, ctx: ctx, - config: config, + config: *config, app: app, server: server, builder: builder, diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 2202de4a..199ca180 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -64,7 +64,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App Config: configuration, Context: ctx, InstanceID: uuid.New(), - router: v2.Router(), + router: v2.RouterWithPrefix(configuration.HTTP.Prefix), } app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "app.id")) From 32f5965c0608997c44875ca464ccb11242c78f91 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 26 Feb 2015 16:43:47 -0800 Subject: [PATCH 035/501] Specify and implement Docker-Upload-UUID This changeset adds support for a header to identify docker upload uuids. This id can be used as a key to manage local state for resumable uploads. The goal is remove the necessity for a client to parse the url to get an upload uuid. The restrictions for clients to use the location header are still strongly in place. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 11 +++++++++ docs/handlers/api_test.go | 43 +++++++++++++++++++++++++++--------- docs/handlers/layerupload.go | 5 +++++ 3 files changed, 48 insertions(+), 11 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 2c6fafd0..14b2ee4c 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -72,6 +72,13 @@ var ( Format: "0", } + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + unauthorizedResponse = ResponseDescriptor{ Description: "The client does not have access to the repository.", StatusCode: http.StatusUnauthorized, @@ -898,6 +905,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, contentLengthZeroHeader, + dockerUploadUUIDHeader, }, }, }, @@ -941,6 +949,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "0-0", Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", }, + dockerUploadUUIDHeader, }, }, }, @@ -994,6 +1003,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, + dockerUploadUUIDHeader, }, }, }, @@ -1077,6 +1087,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, + dockerUploadUUIDHeader, }, }, }, diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index a14e93dc..45db0a94 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -11,6 +11,7 @@ import ( "net/http/httputil" "net/url" "os" + "path" "reflect" "testing" @@ -97,8 +98,20 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) // ------------------------------------------ - // Start an upload and cancel - uploadURLBase := startPushLayer(t, env.builder, imageName) + // Start an upload, check the status then cancel + uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) + + // A status check should work + resp, err = http.Get(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error getting upload status: %v", err) + } + checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Range": []string{"0-0"}, + "Docker-Upload-UUID": []string{uploadUUID}, + }) req, err := http.NewRequest("DELETE", uploadURLBase, nil) if err != nil { @@ -121,7 +134,7 @@ func TestLayerAPI(t *testing.T) { // ----------------------------------------- // Do layer push with an empty body and different digest - uploadURLBase = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) @@ -137,7 +150,7 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("unexpected error digesting empty buffer: %v", err) } - uploadURLBase = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- @@ -150,7 +163,7 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("unexpected error digesting empty tar: %v", err) } - uploadURLBase = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ @@ -158,7 +171,7 @@ func TestLayerAPI(t *testing.T) { layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - uploadURLBase = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------ @@ -284,7 +297,7 @@ func TestManifestAPI(t *testing.T) { expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } @@ -411,7 +424,7 @@ func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { return resp } -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { layerUploadURL, err := ub.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) @@ -424,12 +437,20 @@ func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { defer resp.Body.Close() checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + + u, err := url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatalf("error parsing location header: %v", err) + } + + uuid = path.Base(u.Path) checkHeaders(t, resp, http.Header{ - "Location": []string{"*"}, - "Content-Length": []string{"0"}, + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + "Docker-Upload-UUID": []string{uuid}, }) - return resp.Header.Get("Location") + return resp.Header.Get("Location"), uuid } // doPushLayer pushes the layer content returning the url on success returning diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 3a852043..0f0be27f 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -138,6 +138,8 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R luh.Errors.Push(v2.ErrorCodeUnknown, err) return } + + w.Header().Set("Docker-Upload-UUID", luh.Upload.UUID()) w.WriteHeader(http.StatusAccepted) } @@ -155,6 +157,7 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re return } + w.Header().Set("Docker-Upload-UUID", luh.UUID) w.WriteHeader(http.StatusNoContent) } @@ -235,6 +238,7 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. return } + w.Header().Set("Docker-Upload-UUID", luh.UUID) if err := luh.Upload.Cancel(); err != nil { ctxu.GetLogger(luh).Errorf("error encountered canceling upload: %v", err) w.WriteHeader(http.StatusInternalServerError) @@ -277,6 +281,7 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt return err } + w.Header().Set("Docker-Upload-UUID", luh.UUID) w.Header().Set("Location", uploadURL) w.Header().Set("Content-Length", "0") w.Header().Set("Range", fmt.Sprintf("0-%d", luh.State.Offset)) From 3bf768a58851c6e20b9a087ee0db51506158b4f7 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Thu, 26 Feb 2015 08:15:15 -0800 Subject: [PATCH 036/501] Adding test cases to confirm path traversal attempts are mitigated and bad characters in URI return 404 Signed-off-by: David Lawrence (github: endophage) --- docs/api/v2/routes_test.go | 182 ++++++++++++++++++++++++++++--------- 1 file changed, 140 insertions(+), 42 deletions(-) diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index dfd11082..9157e21e 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -2,20 +2,24 @@ package v2 import ( "encoding/json" + "fmt" + "math/rand" "net/http" "net/http/httptest" "reflect" "strings" "testing" + "time" "github.com/gorilla/mux" ) type routeTestCase struct { - RequestURI string - Vars map[string]string - RouteName string - StatusCode int + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int } // TestRouter registers a test handler with all the routes and ensures that @@ -25,36 +29,7 @@ type routeTestCase struct { // // This may go away as the application structure comes together. func TestRouter(t *testing.T) { - baseTestRouter(t, "") -} - -func TestRouterWithPrefix(t *testing.T) { - baseTestRouter(t, "/prefix/") -} - -func baseTestRouter(t *testing.T, prefix string) { - - router := RouterWithPrefix(prefix) - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testCase := routeTestCase{ - RequestURI: r.RequestURI, - Vars: mux.Vars(r), - RouteName: mux.CurrentRoute(r).GetName(), - } - - enc := json.NewEncoder(w) - - if err := enc.Encode(testCase); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - }) - - // Startup test server - server := httptest.NewServer(router) - - for _, testcase := range []routeTestCase{ + testCases := []routeTestCase{ { RouteName: RouteNameBase, RequestURI: "/v2/", @@ -150,14 +125,90 @@ func baseTestRouter(t *testing.T, prefix string) { "name": "foo/bar/manifests", }, }, - { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, - }, - } { - testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI + } + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI // Register the endpoint route := router.GetRoute(testcase.RouteName) if route == nil { @@ -178,6 +229,10 @@ func baseTestRouter(t *testing.T, prefix string) { // Override default, zero-value testcase.StatusCode = http.StatusOK } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } if resp.StatusCode != testcase.StatusCode { t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) @@ -197,13 +252,56 @@ func baseTestRouter(t *testing.T, prefix string) { // Needs to be set out of band actualRouteInfo.StatusCode = resp.StatusCode + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + if actualRouteInfo.RouteName != testcase.RouteName { t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) } - if !reflect.DeepEqual(actualRouteInfo, testcase) { + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } } } + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- From ac7af800fb069af6f84305a06e5b66877b4caaff Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 26 Feb 2015 16:06:36 -0800 Subject: [PATCH 037/501] documentation for name validation grammar Signed-off-by: Stephen J Day --- docs/api/v2/names.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index ffac1858..e889ffe0 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -82,6 +82,16 @@ var ( // registry. This function accepts a superset of what might be accepted by // docker core or docker hub. If the name does not pass validation, an error, // describing the conditions, is returned. +// +// Effectively, the name should comply with the following grammar: +// +// alpha-numeric := /[a-z0-9]+/ +// separator := /[._-]/ +// component := alpha-numeric [separator alpha-numeric]* +// namespace := component ['/' component]* +// +// The result of the production, known as the "namespace", should be limited +// to 255 characters. func ValidateRespositoryName(name string) error { if len(name) > RepositoryNameTotalLengthMax { return ErrRepositoryNameLong From b870e3fdfbb6fa7cbf2209f9460ddd34424a6f8f Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Tue, 3 Mar 2015 14:47:07 -0800 Subject: [PATCH 038/501] wrap buffered writer around filewriter benchmarks added to filewriter_test, demonstrate buffered version is ~5x faster on my hardware. Signed-off-by: David Lawrence (github: endophage) --- docs/storage/filewriter.go | 54 +++++++++++++++++- docs/storage/filewriter_test.go | 98 +++++++++++++++++++++++++++++++++ docs/storage/layerstore.go | 8 +-- docs/storage/layerupload.go | 12 +++- 4 files changed, 163 insertions(+), 9 deletions(-) diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go index cbf03704..5f22142e 100644 --- a/docs/storage/filewriter.go +++ b/docs/storage/filewriter.go @@ -1,6 +1,7 @@ package storage import ( + "bufio" "bytes" "fmt" "io" @@ -9,6 +10,10 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) +const ( + fileWriterBufferSize = 5 << 20 +) + // fileWriter implements a remote file writer backed by a storage driver. type fileWriter struct { driver storagedriver.StorageDriver @@ -22,6 +27,11 @@ type fileWriter struct { err error // terminal error, if set, reader is closed } +type bufferedFileWriter struct { + fileWriter + bw *bufio.Writer +} + // fileWriterInterface makes the desired io compliant interface that the // filewriter should implement. type fileWriterInterface interface { @@ -35,7 +45,7 @@ var _ fileWriterInterface = &fileWriter{} // newFileWriter returns a prepared fileWriter for the driver and path. This // could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(driver storagedriver.StorageDriver, path string) (*fileWriter, error) { +func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { fw := fileWriter{ driver: driver, path: path, @@ -56,7 +66,42 @@ func newFileWriter(driver storagedriver.StorageDriver, path string) (*fileWriter fw.size = fi.Size() } - return &fw, nil + buffered := bufferedFileWriter{ + fileWriter: fw, + } + buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) + + return &buffered, nil +} + +// wraps the fileWriter.Write method to buffer small writes +func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { + return bfw.bw.Write(p) +} + +// wraps fileWriter.Close to ensure the buffer is flushed +// before we close the writer. +func (bfw *bufferedFileWriter) Close() (err error) { + if err = bfw.Flush(); err != nil { + return err + } + err = bfw.fileWriter.Close() + return err +} + +// wraps fileWriter.Seek to ensure offset is handled +// correctly in respect to pending data in the buffer +func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { + if err := bfw.Flush(); err != nil { + return 0, err + } + return bfw.fileWriter.Seek(offset, whence) +} + +// wraps bufio.Writer.Flush to allow intermediate flushes +// of the bufferedFileWriter +func (bfw *bufferedFileWriter) Flush() error { + return bfw.bw.Flush() } // Write writes the buffer p at the current write offset. @@ -108,6 +153,9 @@ func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { } // Close closes the fileWriter for writing. +// Calling it once is valid and correct and it will +// return a nil error. Calling it subsequent times will +// detect that fw.err has been set and will return the error. func (fw *fileWriter) Close() error { if fw.err != nil { return fw.err @@ -115,7 +163,7 @@ func (fw *fileWriter) Close() error { fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - return fw.err + return nil } // readFromAt writes to fw from r at the specified offset. If offset is less diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index 1a38a519..06db31f3 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) @@ -42,6 +43,7 @@ func TestSimpleWrite(t *testing.T) { if err != nil { t.Fatalf("unexpected error writing content: %v", err) } + fw.Flush() if n != len(content) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) @@ -146,3 +148,99 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unable to verify write data") } } + +func TestBufferedFileWriter(t *testing.T) { + writer, err := newFileWriter(inmemory.New(), "/random") + + if err != nil { + t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) + } + + // write one byte and ensure the offset hasn't been incremented. + // offset will only get incremented when the buffer gets flushed + short := []byte{byte(1)} + + writer.Write(short) + + if writer.offset > 0 { + t.Fatalf("WriteStream called prematurely") + } + + // write enough data to cause the buffer to flush and confirm + // the offset has been incremented + long := make([]byte, fileWriterBufferSize) + _, err = rand.Read(long) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + for i := range long { + long[i] = byte(i) + } + writer.Write(long) + writer.Close() + if writer.offset != (fileWriterBufferSize + 1) { + t.Fatalf("WriteStream not called when buffer capacity reached") + } +} + +func BenchmarkFileWriter(b *testing.B) { + b.StopTimer() // not sure how long setup above will take + for i := 0; i < b.N; i++ { + // Start basic fileWriter initialization + fw := fileWriter{ + driver: inmemory.New(), + path: "/random", + } + + if fi, err := fw.driver.Stat(fw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // ignore, offset is zero + default: + b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) + } + } else { + if fi.IsDir() { + b.Fatalf("Cannot write to a directory") + } + + fw.size = fi.Size() + } + + randomBytes := make([]byte, 1<<20) + _, err := rand.Read(randomBytes) + if err != nil { + b.Fatalf("unexpected error building random data: %v", err) + } + // End basic file writer initialization + + b.StartTimer() + for j := 0; j < 100; j++ { + fw.Write(randomBytes) + } + b.StopTimer() + } +} + +func BenchmarkBufferedFileWriter(b *testing.B) { + b.StopTimer() // not sure how long setup above will take + for i := 0; i < b.N; i++ { + bfw, err := newFileWriter(inmemory.New(), "/random") + + if err != nil { + b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) + } + + randomBytes := make([]byte, 1<<20) + _, err = rand.Read(randomBytes) + if err != nil { + b.Fatalf("unexpected error building random data: %v", err) + } + + b.StartTimer() + for j := 0; j < 100; j++ { + bfw.Write(randomBytes) + } + b.StopTimer() + } +} diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index 153e42a8..f546529e 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -139,10 +139,10 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di } return &layerUploadController{ - layerStore: ls, - uuid: uuid, - startedAt: startedAt, - fileWriter: *fw, + layerStore: ls, + uuid: uuid, + startedAt: startedAt, + bufferedFileWriter: *fw, }, nil } diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index 369a9bd5..14e42338 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -22,7 +22,9 @@ type layerUploadController struct { uuid string startedAt time.Time - fileWriter + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisy + // LayerUpload Interface + bufferedFileWriter } var _ distribution.LayerUpload = &layerUploadController{} @@ -42,6 +44,12 @@ func (luc *layerUploadController) StartedAt() time.Time { // format :. func (luc *layerUploadController) Finish(digest digest.Digest) (distribution.Layer, error) { ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Finish") + + err := luc.bufferedFileWriter.Close() + if err != nil { + return nil, err + } + canonical, err := luc.validateLayer(digest) if err != nil { return nil, err @@ -103,7 +111,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige // then only have to fetch the difference. // Read the file from the backend driver and validate it. - fr, err := newFileReader(luc.fileWriter.driver, luc.path) + fr, err := newFileReader(luc.bufferedFileWriter.driver, luc.path) if err != nil { return "", err } From 0b34048fe36460c96a02b5ea0345f64618ba6172 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 3 Mar 2015 19:29:12 -0800 Subject: [PATCH 039/501] Remove unnecessary close in client Signed-off-by: Stephen J Day --- docs/client/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/client/client.go b/docs/client/client.go index 8e868c41..36be960d 100644 --- a/docs/client/client.go +++ b/docs/client/client.go @@ -283,7 +283,6 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { } return -1, &errs default: - response.Body.Close() return -1, &UnexpectedHTTPStatusError{Status: response.Status} } } From a65662c10f18a9f0829585a1d7643d634c28ce0b Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Wed, 4 Mar 2015 20:32:22 +0000 Subject: [PATCH 040/501] Expose Signatures() on Repository Add a SignatureService and expose it via Signatures() on Repository so external integrations wrapping the registry can access signatures. Move signature related code from revisionstore.go to signaturestore.go. Signed-off-by: Andy Goldstein --- docs/storage/registry.go | 6 +++ docs/storage/revisionstore.go | 68 ++---------------------------- docs/storage/signaturestore.go | 75 ++++++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 65 deletions(-) create mode 100644 docs/storage/signaturestore.go diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 1a402f36..8d7ea16e 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -87,3 +87,9 @@ func (repo *repository) Layers() distribution.LayerService { repository: repo, } } + +func (repo *repository) Signatures() distribution.SignatureService { + return &signatureStore{ + repository: repo, + } +} diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index e7122f3e..ac605360 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -2,7 +2,6 @@ package storage import ( "encoding/json" - "path" "github.com/Sirupsen/logrus" "github.com/docker/distribution" @@ -53,7 +52,7 @@ func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, } // Fetch the signatures for the manifest - signatures, err := rs.getSignatures(revision) + signatures, err := rs.Signatures().Get(revision) if err != nil { return nil, err } @@ -104,10 +103,8 @@ func (rs *revisionStore) put(sm *manifest.SignedManifest) (digest.Digest, error) return "", err } - for _, signature := range signatures { - if err := rs.putSignature(revision, signature); err != nil { - return "", err - } + if err := rs.Signatures().Put(revision, signatures...); err != nil { + return "", err } return revision, nil @@ -147,62 +144,3 @@ func (rs *revisionStore) delete(revision digest.Digest) error { return rs.driver.Delete(revisionPath) } - -// getSignatures retrieves all of the signature blobs for the specified -// manifest revision. -func (rs *revisionStore) getSignatures(revision digest.Digest) ([][]byte, error) { - signaturesPath, err := rs.pm.path(manifestSignaturesPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return nil, err - } - - // Need to append signature digest algorithm to path to get all items. - // Perhaps, this should be in the pathMapper but it feels awkward. This - // can be eliminated by implementing listAll on drivers. - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := rs.driver.List(signaturesPath) - if err != nil { - return nil, err - } - - var signatures [][]byte - for _, sigPath := range signaturePaths { - // Append the link portion - sigPath = path.Join(sigPath, "link") - - // TODO(stevvooe): These fetches should be parallelized for performance. - p, err := rs.blobStore.linked(sigPath) - if err != nil { - return nil, err - } - - signatures = append(signatures, p) - } - - return signatures, nil -} - -// putSignature stores the signature for the provided manifest revision. -func (rs *revisionStore) putSignature(revision digest.Digest, signature []byte) error { - signatureDigest, err := rs.blobStore.put(signature) - if err != nil { - return err - } - - signaturePath, err := rs.pm.path(manifestSignatureLinkPathSpec{ - name: rs.Name(), - revision: revision, - signature: signatureDigest, - }) - - if err != nil { - return err - } - - return rs.blobStore.link(signaturePath, signatureDigest) -} diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go new file mode 100644 index 00000000..abc52ca6 --- /dev/null +++ b/docs/storage/signaturestore.go @@ -0,0 +1,75 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" +) + +type signatureStore struct { + *repository +} + +var _ distribution.SignatureService = &signatureStore{} + +func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { + signaturesPath, err := s.pm.path(manifestSignaturesPathSpec{ + name: s.Name(), + revision: dgst, + }) + + if err != nil { + return nil, err + } + + // Need to append signature digest algorithm to path to get all items. + // Perhaps, this should be in the pathMapper but it feels awkward. This + // can be eliminated by implementing listAll on drivers. + signaturesPath = path.Join(signaturesPath, "sha256") + + signaturePaths, err := s.driver.List(signaturesPath) + if err != nil { + return nil, err + } + + var signatures [][]byte + for _, sigPath := range signaturePaths { + // Append the link portion + sigPath = path.Join(sigPath, "link") + + // TODO(stevvooe): These fetches should be parallelized for performance. + p, err := s.blobStore.linked(sigPath) + if err != nil { + return nil, err + } + + signatures = append(signatures, p) + } + + return signatures, nil +} + +func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { + for _, signature := range signatures { + signatureDigest, err := s.blobStore.put(signature) + if err != nil { + return err + } + + signaturePath, err := s.pm.path(manifestSignatureLinkPathSpec{ + name: s.Name(), + revision: dgst, + signature: signatureDigest, + }) + + if err != nil { + return err + } + + if err := s.blobStore.link(signaturePath, signatureDigest); err != nil { + return err + } + } + return nil +} From f46a1b73e8d7b26716a5164afd7f9fc756e7fca7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 25 Feb 2015 18:04:28 -0800 Subject: [PATCH 041/501] spec: fetch manifests by tag or digest Manifests are now fetched by a field called "reference", which may be a tag or a digest. When using digests to reference a manifest, the data is immutable. The routes and specification have been updated to allow this. There are a few caveats to this approach: 1. It may be problematic to rely on data format to differentiate between a tag and a digest. Currently, they are disjoint but there may modifications on either side that break this guarantee. 2. The caching characteristics of returned content are very different for digest versus tag-based references. Digest urls can be cached forever while tag urls cannot. Both of these are minimal caveats that we can live with in the future. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 33 +++++++++++++++++++++++++++------ docs/api/v2/errors.go | 3 +++ docs/api/v2/routes_test.go | 20 ++++++++++++++------ docs/api/v2/urls.go | 7 ++++--- 4 files changed, 48 insertions(+), 15 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 301fd596..5f091bbc 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -79,6 +79,13 @@ var ( Format: "", } + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + unauthorizedResponse = ResponseDescriptor{ Description: "The client does not have access to the repository.", StatusCode: http.StatusUnauthorized, @@ -454,13 +461,13 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameManifest, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{tag:" + TagNameRegexp.String() + "}", + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", - Description: "Fetch the manifest identified by `name` and `tag`.", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -473,8 +480,11 @@ var routeDescriptors = []RouteDescriptor{ }, Successes: []ResponseDescriptor{ { - Description: "The manifest idenfied by `name` and `tag`. The contents can be used to identify and resolve resources required to run the specified image.", + Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: manifestBody, @@ -483,7 +493,7 @@ var routeDescriptors = []RouteDescriptor{ }, Failures: []ResponseDescriptor{ { - Description: "The name or tag was invalid.", + Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, ErrorCodes: []ErrorCode{ ErrorCodeNameInvalid, @@ -523,7 +533,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Method: "PUT", - Description: "Put the manifest identified by `name` and `tag`.", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -550,6 +560,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, contentLengthZeroHeader, + digestHeader, }, }, }, @@ -628,7 +639,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Method: "DELETE", - Description: "Delete the manifest identified by `name` and `tag`.", + Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -729,6 +740,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The length of the requested blob content.", Format: "", }, + digestHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", @@ -745,6 +757,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The location where the layer should be accessible.", Format: "", }, + digestHeader, }, }, }, @@ -1193,6 +1206,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "", Description: "Length of the chunk being uploaded, corresponding the length of the request body.", }, + digestHeader, }, }, }, @@ -1312,6 +1326,13 @@ var errorDescriptors = []ErrorDescriptor{ Description: `Generic error returned when the error does not have an API classification.`, }, + { + Code: ErrorCodeUnsupported, + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + }, { Code: ErrorCodeUnauthorized, Value: "UNAUTHORIZED", diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index 4d5d55c7..cbae020e 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -13,6 +13,9 @@ const ( // ErrorCodeUnknown is a catch-all for errors not defined below. ErrorCodeUnknown ErrorCode = iota + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported + // ErrorCodeUnauthorized is returned if a request is not authorized. ErrorCodeUnauthorized diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index 9157e21e..afab71fc 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -39,16 +39,24 @@ func TestRouter(t *testing.T) { RouteName: RouteNameManifest, RequestURI: "/v2/foo/manifests/bar", Vars: map[string]string{ - "name": "foo", - "tag": "bar", + "name": "foo", + "reference": "bar", }, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/tag", Vars: map[string]string{ - "name": "foo/bar", - "tag": "tag", + "name": "foo/bar", + "reference": "tag", + }, + }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", }, }, { @@ -112,8 +120,8 @@ func TestRouter(t *testing.T) { RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/manifests/tags", Vars: map[string]string{ - "name": "foo/bar/manifests", - "tag": "tags", + "name": "foo/bar/manifests", + "reference": "tags", }, }, { diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index e36afdab..4b42dd16 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -107,11 +107,12 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { return tagsURL.String(), nil } -// BuildManifestURL constructs a url for the manifest identified by name and tag. -func (ub *URLBuilder) BuildManifestURL(name, tag string) (string, error) { +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name, "tag", tag) + manifestURL, err := route.URL("name", name, "reference", reference) if err != nil { return "", err } From 008236cfef2e9eaada4eaf2a99dfd11f902122e9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 26 Feb 2015 15:47:04 -0800 Subject: [PATCH 042/501] Implement immutable manifest reference support This changeset implements immutable manifest references via the HTTP API. Most of the changes follow from modifications to ManifestService. Once updates were made across the repo to implement these changes, the http handlers were change accordingly. The new methods on ManifestService will be broken out into a tagging service in a later PR. Unfortunately, due to complexities around managing the manifest tag index in an eventually consistent manner, direct deletes of manifests have been disabled. Signed-off-by: Stephen J Day --- docs/handlers/api_test.go | 68 ++++++++++++++- docs/handlers/app.go | 3 +- docs/handlers/app_test.go | 2 +- docs/handlers/context.go | 4 +- docs/handlers/images.go | 129 +++++++++++++++++++++++++---- docs/handlers/layer.go | 2 + docs/handlers/layerupload.go | 5 ++ docs/storage/manifeststore.go | 75 +++++++---------- docs/storage/manifeststore_test.go | 89 +++++++++++++++----- docs/storage/paths.go | 37 +++++++-- docs/storage/paths_test.go | 8 ++ docs/storage/tagstore.go | 2 +- 12 files changed, 325 insertions(+), 99 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 902cb9a6..4a273b28 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -218,7 +218,8 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "checking head on existing layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{layerDigest.String()}, }) // ---------------- @@ -230,7 +231,8 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ - "Content-Length": []string{fmt.Sprint(layerLength)}, + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{layerDigest.String()}, }) // Verify the body @@ -286,6 +288,9 @@ func TestManifestAPI(t *testing.T) { // -------------------------------- // Attempt to push unsigned manifest with missing layers unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, Name: imageName, Tag: tag, FSLayers: []manifest.FSLayer{ @@ -343,9 +348,33 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("unexpected error signing manifest: %v", err) } + payload, err := signedManifest.Payload() + checkErr(t, err, "getting manifest payload") + + dgst, err := digest.FromBytes(payload) + checkErr(t, err, "digesting manifest") + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name resp, err = http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) @@ -353,6 +382,9 @@ func TestManifestAPI(t *testing.T) { defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) var fetchedManifest manifest.SignedManifest dec := json.NewDecoder(resp.Body) @@ -364,6 +396,27 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("manifests do not match") } + // --------------- + // Fetch by digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + var fetchedManifestByDigest manifest.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } + // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { @@ -534,8 +587,9 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, } checkHeaders(t, resp, http.Header{ - "Location": []string{expectedLayerURL}, - "Content-Length": []string{"0"}, + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, }) return resp.Header.Get("Location") @@ -634,3 +688,9 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { } } } + +func checkErr(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("unexpected error %s: %v", msg, err) + } +} diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 199ca180..12837cc8 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -277,9 +277,8 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", - "vars.tag", + "vars.reference", "vars.digest", - "vars.tag", "vars.uuid")) context := &Context{ diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 158f5fc1..ba580b11 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -84,7 +84,7 @@ func TestAppDispatcher(t *testing.T) { endpoint: v2.RouteNameManifest, vars: []string{ "name", "foo/bar", - "tag", "sometag", + "reference", "sometag", }, }, { diff --git a/docs/handlers/context.go b/docs/handlers/context.go index ee02a53a..5496a794 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -45,8 +45,8 @@ func getName(ctx context.Context) (name string) { return ctxu.GetStringValue(ctx, "vars.name") } -func getTag(ctx context.Context) (tag string) { - return ctxu.GetStringValue(ctx, "vars.tag") +func getReference(ctx context.Context) (reference string) { + return ctxu.GetStringValue(ctx, "vars.reference") } var errDigestNotAvailable = fmt.Errorf("digest not available in context") diff --git a/docs/handlers/images.go b/docs/handlers/images.go index de7b6dd6..174bd3d9 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "strings" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -11,6 +12,7 @@ import ( "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" + "golang.org/x/net/context" ) // imageManifestDispatcher takes the request context and builds the @@ -18,7 +20,14 @@ import ( func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { imageManifestHandler := &imageManifestHandler{ Context: ctx, - Tag: getTag(ctx), + } + reference := getReference(ctx) + dgst, err := digest.ParseDigest(reference) + if err != nil { + // We just have a tag + imageManifestHandler.Tag = reference + } else { + imageManifestHandler.Digest = dgst } return handlers.MethodHandler{ @@ -32,14 +41,26 @@ func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { type imageManifestHandler struct { *Context - Tag string + // One of tag or digest gets set, depending on what is present in context. + Tag string + Digest digest.Digest } // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") manifests := imh.Repository.Manifests() - manifest, err := manifests.Get(imh.Tag) + + var ( + sm *manifest.SignedManifest + err error + ) + + if imh.Tag != "" { + sm, err = manifests.GetByTag(imh.Tag) + } else { + sm, err = manifests.Get(imh.Digest) + } if err != nil { imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) @@ -47,9 +68,22 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } + // Get the digest, if we don't already have it. + if imh.Digest == "" { + dgst, err := digestManifest(imh, sm) + if err != nil { + imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + w.WriteHeader(http.StatusBadRequest) + return + } + + imh.Digest = dgst + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(manifest.Raw))) - w.Write(manifest.Raw) + w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Write(sm.Raw) } // PutImageManifest validates and stores and image in the registry. @@ -65,7 +99,37 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - if err := manifests.Put(imh.Tag, &manifest); err != nil { + dgst, err := digestManifest(imh, &manifest) + if err != nil { + imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + w.WriteHeader(http.StatusBadRequest) + return + } + + // Validate manifest tag or digest matches payload + if imh.Tag != "" { + if manifest.Tag != imh.Tag { + ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) + imh.Errors.Push(v2.ErrorCodeTagInvalid) + w.WriteHeader(http.StatusBadRequest) + return + } + + imh.Digest = dgst + } else if imh.Digest != "" { + if dgst != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) + imh.Errors.Push(v2.ErrorCodeDigestInvalid) + w.WriteHeader(http.StatusBadRequest) + return + } + } else { + imh.Errors.Push(v2.ErrorCodeTagInvalid, "no tag or digest specified") + w.WriteHeader(http.StatusBadRequest) + return + } + + if err := manifests.Put(&manifest); err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. switch err := err.(type) { @@ -94,25 +158,54 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } + // Construct a canonical url for the uploaded manifest. + location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) + if err != nil { + // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to + // happen. We'll log the error here but proceed as if it worked. Worst + // case, we set an empty location header. + ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + } + + w.Header().Set("Location", location) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.WriteHeader(http.StatusAccepted) } // DeleteImageManifest removes the image with the given tag from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("DeleteImageManifest") - manifests := imh.Repository.Manifests() - if err := manifests.Delete(imh.Tag); err != nil { - switch err := err.(type) { - case distribution.ErrManifestUnknown: - imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) - w.WriteHeader(http.StatusNotFound) - default: - imh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusBadRequest) + + // TODO(stevvooe): Unfortunately, at this point, manifest deletes are + // unsupported. There are issues with schema version 1 that make removing + // tag index entries a serious problem in eventually consistent storage. + // Once we work out schema version 2, the full deletion system will be + // worked out and we can add support back. + imh.Errors.Push(v2.ErrorCodeUnsupported) + w.WriteHeader(http.StatusBadRequest) +} + +// digestManifest takes a digest of the given manifest. This belongs somewhere +// better but we'll wait for a refactoring cycle to find that real somewhere. +func digestManifest(ctx context.Context, sm *manifest.SignedManifest) (digest.Digest, error) { + p, err := sm.Payload() + if err != nil { + if !strings.Contains(err.Error(), "missing signature key") { + ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) + return "", err } - return + + // NOTE(stevvooe): There are no signatures but we still have a + // payload. The request will fail later but this is not the + // responsibility of this part of the code. + p = sm.Raw } - w.Header().Set("Content-Length", "0") - w.WriteHeader(http.StatusAccepted) + dgst, err := digest.FromBytes(p) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) + return "", err + } + + return dgst, err } diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 69c3df7c..913002e0 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -64,6 +64,8 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { } defer layer.Close() + w.Header().Set("Docker-Content-Digest", lh.Digest.String()) + if lh.layerHandler != nil { handler, _ := lh.layerHandler.Resolve(layer) if handler != nil { diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 0f0be27f..b728d0e1 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -193,6 +193,10 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * // TODO(stevvooe): Check the incoming range header here, per the // specification. LayerUpload should be seeked (sought?) to that position. + // TODO(stevvooe): Consider checking the error on this copy. + // Theoretically, problems should be detected during verification but we + // may miss a root cause. + // Read in the final chunk, if any. io.Copy(luh.Upload, r.Body) @@ -227,6 +231,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * w.Header().Set("Location", layerURL) w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", layer.Digest().String()) w.WriteHeader(http.StatusCreated) } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 765b5d05..4946785d 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -5,6 +5,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) @@ -18,31 +19,17 @@ type manifestStore struct { var _ distribution.ManifestService = &manifestStore{} -// func (ms *manifestStore) Repository() Repository { -// return ms.repository -// } - -func (ms *manifestStore) Tags() ([]string, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) Exists(tag string) (bool, error) { +func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") - return ms.tagStore.exists(tag) + return ms.revisionStore.exists(dgst) } -func (ms *manifestStore) Get(tag string) (*manifest.SignedManifest, error) { +func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") - dgst, err := ms.tagStore.resolve(tag) - if err != nil { - return nil, err - } - return ms.revisionStore.get(dgst) } -func (ms *manifestStore) Put(tag string, manifest *manifest.SignedManifest) error { +func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") // TODO(stevvooe): Add check here to see if the revision is already @@ -51,7 +38,7 @@ func (ms *manifestStore) Put(tag string, manifest *manifest.SignedManifest) erro // indicating what happened. // Verify the manifest. - if err := ms.verifyManifest(tag, manifest); err != nil { + if err := ms.verifyManifest(manifest); err != nil { return err } @@ -62,46 +49,46 @@ func (ms *manifestStore) Put(tag string, manifest *manifest.SignedManifest) erro } // Now, tag the manifest - return ms.tagStore.tag(tag, revision) + return ms.tagStore.tag(manifest.Tag, revision) } -// Delete removes all revisions of the given tag. We may want to change these -// semantics in the future, but this will maintain consistency. The underlying -// blobs are left alone. -func (ms *manifestStore) Delete(tag string) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete") +// Delete removes the revision of the specified manfiest. +func (ms *manifestStore) Delete(dgst digest.Digest) error { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete - unsupported") + return fmt.Errorf("deletion of manifests not supported") +} - revisions, err := ms.tagStore.revisions(tag) +func (ms *manifestStore) Tags() ([]string, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") + return ms.tagStore.tags() +} + +func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).ExistsByTag") + return ms.tagStore.exists(tag) +} + +func (ms *manifestStore) GetByTag(tag string) (*manifest.SignedManifest, error) { + ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).GetByTag") + dgst, err := ms.tagStore.resolve(tag) if err != nil { - return err + return nil, err } - for _, revision := range revisions { - if err := ms.revisionStore.delete(revision); err != nil { - return err - } - } - - return ms.tagStore.delete(tag) + return ms.revisionStore.get(dgst) } // verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the name and tag match and -// that the signature is valid for the enclosed payload. As a policy, the -// registry only tries to store valid content, leaving trust policies of that -// content up to consumers. -func (ms *manifestStore) verifyManifest(tag string, mnfst *manifest.SignedManifest) error { +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { // TODO(stevvooe): This needs to be an exported error errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } - if mnfst.Tag != tag { - // TODO(stevvooe): This needs to be an exported error. - errs = append(errs, fmt.Errorf("tag does not match manifest tag")) - } - if _, err := manifest.Verify(mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index d3a55ce5..dc03dced 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -9,25 +9,47 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "golang.org/x/net/context" ) -func TestManifestStorage(t *testing.T) { +type manifestStoreTestEnv struct { + ctx context.Context + driver driver.StorageDriver + registry distribution.Registry + repository distribution.Repository + name string + tag string +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() - name := "foo/bar" - tag := "thetag" driver := inmemory.New() registry := NewRegistryWithDriver(driver) + repo, err := registry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - ms := repo.Manifests() - exists, err := ms.Exists(tag) + return &manifestStoreTestEnv{ + ctx: ctx, + driver: driver, + registry: registry, + repository: repo, + name: name, + tag: tag, + } +} + +func TestManifestStorage(t *testing.T) { + env := newManifestStoreTestEnv(t, "foo/bar", "thetag") + ms := env.repository.Manifests() + + exists, err := ms.ExistsByTag(env.tag) if err != nil { t.Fatalf("unexpected error checking manifest existence: %v", err) } @@ -36,7 +58,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest should not exist") } - if _, err := ms.Get(tag); true { + if _, err := ms.GetByTag(env.tag); true { switch err.(type) { case distribution.ErrManifestUnknown: break @@ -49,8 +71,8 @@ func TestManifestStorage(t *testing.T) { Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: name, - Tag: tag, + Name: env.name, + Tag: env.tag, } // Build up some test layers and add them to the manifest, saving the @@ -79,7 +101,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - err = ms.Put(tag, sm) + err = ms.Put(sm) if err == nil { t.Fatalf("expected errors putting manifest") } @@ -88,7 +110,7 @@ func TestManifestStorage(t *testing.T) { // Now, upload the layers that were missing! for dgst, rs := range testLayers { - upload, err := repo.Layers().Upload() + upload, err := env.repository.Layers().Upload() if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } @@ -102,11 +124,11 @@ func TestManifestStorage(t *testing.T) { } } - if err = ms.Put(tag, sm); err != nil { + if err = ms.Put(sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - exists, err = ms.Exists(tag) + exists, err = ms.ExistsByTag(env.tag) if err != nil { t.Fatalf("unexpected error checking manifest existence: %v", err) } @@ -115,7 +137,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest should exist") } - fetchedManifest, err := ms.Get(tag) + fetchedManifest, err := ms.GetByTag(env.tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -134,6 +156,31 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error extracting payload: %v", err) } + // Now that we have a payload, take a moment to check that the manifest is + // return by the payload digest. + dgst, err := digest.FromBytes(payload) + if err != nil { + t.Fatalf("error getting manifest digest: %v", err) + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("error checking manifest existence by digest: %v", err) + } + + if !exists { + t.Fatalf("manifest %s should exist", dgst) + } + + fetchedByDigest, err := ms.Get(dgst) + if err != nil { + t.Fatalf("unexpected error fetching manifest by digest: %v", err) + } + + if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) + } + sigs, err := fetchedJWS.Signatures() if err != nil { t.Fatalf("unable to extract signatures: %v", err) @@ -153,8 +200,8 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected tags returned: %v", tags) } - if tags[0] != tag { - t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{tag}) + if tags[0] != env.tag { + t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) } // Now, push the same manifest with a different key @@ -182,11 +229,11 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } - if err = ms.Put(tag, sm2); err != nil { + if err = ms.Put(sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - fetched, err := ms.Get(tag) + fetched, err := ms.GetByTag(env.tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -231,7 +278,11 @@ func TestManifestStorage(t *testing.T) { } } - if err := ms.Delete(tag); err != nil { - t.Fatalf("unexpected error deleting manifest: %v", err) + // TODO(stevvooe): Currently, deletes are not supported due to some + // complexity around managing tag indexes. We'll add this support back in + // when the manifest format has settled. For now, we expect an error for + // all deletes. + if err := ms.Delete(dgst); err == nil { + t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 9380dc65..173e98a8 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -72,11 +72,12 @@ const storagePathVersion = "v2" // // Tags: // -// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ -// manifestTagPathSpec: /v2/repositories//_manifests/tags// -// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link -// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ -// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index///link +// manifestTagsPathSpec: /v2/repositories//_manifests/tags/ +// manifestTagPathSpec: /v2/repositories//_manifests/tags// +// manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link +// manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ +// manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// +// manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link // // Layers: // @@ -199,6 +200,17 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { } return path.Join(root, "index"), nil + case manifestTagIndexEntryLinkPathSpec: + root, err := pm.path(manifestTagIndexEntryPathSpec{ + name: v.name, + tag: v.tag, + revision: v.revision, + }) + if err != nil { + return "", err + } + + return path.Join(root, "link"), nil case manifestTagIndexEntryPathSpec: root, err := pm.path(manifestTagIndexPathSpec{ name: v.name, @@ -213,7 +225,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return "", err } - return path.Join(root, path.Join(append(components, "link")...)), nil + return path.Join(root, path.Join(components...)), nil case layerLinkPathSpec: components, err := digestPathComponents(v.digest, false) if err != nil { @@ -332,8 +344,7 @@ type manifestTagIndexPathSpec struct { func (manifestTagIndexPathSpec) pathSpec() {} -// manifestTagIndexEntryPathSpec describes the link to a revisions of a -// manifest with given tag within the index. +// manifestTagIndexEntryPathSpec contains the entries of the index by revision. type manifestTagIndexEntryPathSpec struct { name string tag string @@ -342,6 +353,16 @@ type manifestTagIndexEntryPathSpec struct { func (manifestTagIndexEntryPathSpec) pathSpec() {} +// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a +// manifest with given tag within the index. +type manifestTagIndexEntryLinkPathSpec struct { + name string + tag string + revision digest.Digest +} + +func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} + // layerLink specifies a path for a layer link, which is a file with a blob // id. The layer link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 79410e75..7dff6e09 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -78,6 +78,14 @@ func TestPathMapper(t *testing.T) { tag: "thetag", revision: "sha256:abcdef0123456789", }, + expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + }, + { + spec: manifestTagIndexEntryLinkPathSpec{ + name: "foo/bar", + tag: "thetag", + revision: "sha256:abcdef0123456789", + }, expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", }, { diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 147623a2..616df952 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -63,7 +63,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { // tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) tag(tag string, revision digest.Digest) error { - indexEntryPath, err := ts.pm.path(manifestTagIndexEntryPathSpec{ + indexEntryPath, err := ts.pm.path(manifestTagIndexEntryLinkPathSpec{ name: ts.Name(), tag: tag, revision: revision, From 19061f347e12128a1cf5a810833a750826a16110 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 5 Mar 2015 17:23:33 -0800 Subject: [PATCH 043/501] doc: move storage driver readmes into docs Signed-off-by: Stephen J Day --- docs/storage/driver/README.md | 49 ------------------------ docs/storage/driver/azure/README.md | 16 -------- docs/storage/driver/filesystem/README.md | 8 ---- docs/storage/driver/inmemory/README.md | 10 ----- docs/storage/driver/s3/README.md | 26 ------------- 5 files changed, 109 deletions(-) delete mode 100644 docs/storage/driver/README.md delete mode 100644 docs/storage/driver/azure/README.md delete mode 100644 docs/storage/driver/filesystem/README.md delete mode 100644 docs/storage/driver/inmemory/README.md delete mode 100644 docs/storage/driver/s3/README.md diff --git a/docs/storage/driver/README.md b/docs/storage/driver/README.md deleted file mode 100644 index b603503e..00000000 --- a/docs/storage/driver/README.md +++ /dev/null @@ -1,49 +0,0 @@ -Docker-Registry Storage Driver -============================== - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -Provided Drivers -================ - -This storage driver package comes bundled with three default drivers. - -1. filesystem: A local storage driver configured to use a directory tree in the local filesystem. -2. s3: A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -3. inmemory: A temporary storage driver using a local inmemory map. This exists solely for reference and testing. - -Storage Driver API -================== - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. - -Driver Selection and Configuration -================================== - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. - -Driver Contribution -=================== - -## Writing new storage drivers -To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. - -### In-process drivers -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -### Out-of-process drivers -As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. - -Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. - -## Testing -Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. - -## Drivers written in other languages -Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. diff --git a/docs/storage/driver/azure/README.md b/docs/storage/driver/azure/README.md deleted file mode 100644 index f0fd296d..00000000 --- a/docs/storage/driver/azure/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Docker Registry Microsoft Azure Blob Storage Driver - - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. - -## Parameters - -The following parameters must be used to authenticate and configure the storage driver (case-sensitive): - -* `accountname`: Name of the Azure Storage Account. -* `accountkey`: Primary or Secondary Key for the Storage Account. -* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. - - -[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx \ No newline at end of file diff --git a/docs/storage/driver/filesystem/README.md b/docs/storage/driver/filesystem/README.md deleted file mode 100644 index ba3ea564..00000000 --- a/docs/storage/driver/filesystem/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Docker-Registry Filesystem Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/tmp/registry/storage`. diff --git a/docs/storage/driver/inmemory/README.md b/docs/storage/driver/inmemory/README.md deleted file mode 100644 index 2447e2ca..00000000 --- a/docs/storage/driver/inmemory/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Docker-Registry In-Memory Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. - -**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. - -## Parameters - -None diff --git a/docs/storage/driver/s3/README.md b/docs/storage/driver/s3/README.md deleted file mode 100644 index fb0dd014..00000000 --- a/docs/storage/driver/s3/README.md +++ /dev/null @@ -1,26 +0,0 @@ -Docker-Registry S3 Storage Driver -========================================= - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transfering over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). From 4e3bf4bad4df87f447678dd675b5f44de0ff8c58 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 4 Mar 2015 20:57:14 -0800 Subject: [PATCH 044/501] Update notification event Target fields Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/storage/layerreader.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 2d8e588d..1de98e50 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -21,6 +21,10 @@ func (lrs *layerReader) Digest() digest.Digest { return lrs.digest } +func (lrs *layerReader) Length() int64 { + return lrs.size +} + func (lrs *layerReader) CreatedAt() time.Time { return lrs.modtime } From 98daae176ab559396c96ee0601a144429219ed69 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 4 Mar 2015 16:31:31 -0800 Subject: [PATCH 045/501] Switch to SHA256 as canonical digest Also support client digests linking to canonical digest. --- docs/storage/layerupload.go | 54 ++++++++++++++++++------------------- docs/storage/paths.go | 6 ----- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index 14e42338..940f2938 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -11,7 +11,6 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/docker/pkg/tarsum" ) // layerUploadController is used to control the various aspects of resumable @@ -61,7 +60,7 @@ func (luc *layerUploadController) Finish(digest digest.Digest) (distribution.Lay } // Link the layer blob into the repository. - if err := luc.linkLayer(canonical); err != nil { + if err := luc.linkLayer(canonical, digest); err != nil { return nil, err } @@ -86,23 +85,6 @@ func (luc *layerUploadController) Cancel() error { // validateLayer checks the layer data against the digest, returning an error // if it does not match. The canonical digest is returned. func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) { - // First, check the incoming tarsum version of the digest. - version, err := tarsum.GetVersionFromTarsum(dgst.String()) - if err != nil { - return "", err - } - - // TODO(stevvooe): Should we push this down into the digest type? - switch version { - case tarsum.Version1: - default: - // version 0 and dev, for now. - return "", distribution.ErrLayerInvalidDigest{ - Digest: dgst, - Reason: distribution.ErrLayerTarSumVersionUnsupported, - } - } - digestVerifier := digest.NewDigestVerifier(dgst) // TODO(stevvooe): Store resumable hash calculations in upload directory @@ -122,7 +104,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige // sink. Instead, its read driven. This might be okay. // Calculate an updated digest with the latest version. - canonical, err := digest.FromTarArchive(tr) + canonical, err := digest.FromReader(tr) if err != nil { return "", err } @@ -195,17 +177,33 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { // linkLayer links a valid, written layer blob into the registry under the // named repository for the upload controller. -func (luc *layerUploadController) linkLayer(digest digest.Digest) error { - layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{ - name: luc.layerStore.repository.Name(), - digest: digest, - }) +func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical}, aliases...) - if err != nil { - return err + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{ + name: luc.layerStore.repository.Name(), + digest: dgst, + }) + + if err != nil { + return err + } + + if err := luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { + return err + } } - return luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(digest)) + return nil } // removeResources should clean up all resources associated with the upload diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 173e98a8..179e7b78 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -232,12 +232,6 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return "", err } - // For now, only map tarsum paths. - if components[0] != "tarsum" { - // Only tarsum is supported, for now - return "", fmt.Errorf("unsupported content digest: %v", v.digest) - } - layerLinkPathComponents := append(repoPrefix, v.name, "_layers") return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil From 2a786bfc23934590f1ea8e0ab4e230d93e1d2c60 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Wed, 4 Mar 2015 20:26:56 -0800 Subject: [PATCH 046/501] fixing up tests to work with for non-tarsum future Signed-off-by: David Lawrence (github: endophage) --- docs/handlers/api_test.go | 16 ++++++++++++---- docs/storage/layer_test.go | 4 ++-- docs/storage/layerupload.go | 2 +- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 4a273b28..22f2d9ca 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -573,7 +573,9 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, body) + digester := digest.NewCanonicalDigester() + + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester)) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } @@ -581,7 +583,13 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + sha256Dgst := digester.Digest() + + expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) if err != nil { t.Fatalf("error building expected layer url: %v", err) } @@ -589,7 +597,7 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, checkHeaders(t, resp, http.Header{ "Location": []string{expectedLayerURL}, "Content-Length": []string{"0"}, - "Docker-Content-Digest": []string{dgst.String()}, + "Docker-Content-Digest": []string{sha256Dgst.String()}, }) return resp.Header.Get("Location") @@ -682,7 +690,7 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { for _, hv := range resp.Header[k] { if hv != v { - t.Fatalf("header value not matched in response: %q != %q", hv, v) + t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v) } } } diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index ea101b53..43e028d5 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -266,12 +266,12 @@ func TestLayerUploadZeroLength(t *testing.T) { io.Copy(upload, bytes.NewReader([]byte{})) - dgst, err := digest.FromTarArchive(bytes.NewReader([]byte{})) + dgst, err := digest.FromReader(bytes.NewReader([]byte{})) if err != nil { t.Fatalf("error getting zero digest: %v", err) } - if dgst != digest.DigestTarSumV1EmptyTar { + if dgst != digest.DigestSha256EmptyTar { // sanity check on zero digest t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) } diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index 940f2938..69b547f5 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -159,7 +159,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing // to this happen for the zero tarsum. - if dgst == digest.DigestTarSumV1EmptyTar { + if dgst == digest.DigestSha256EmptyTar { return luc.driver.PutContent(blobPath, []byte{}) } From ccfadc93aa34a849e681b645c915ebf62b5fb4b4 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 5 Mar 2015 10:11:37 -0500 Subject: [PATCH 047/501] Remove max repository component length restriction Fixes #241 Signed-off-by: Andy Goldstein --- docs/api/v2/names.go | 37 ++++++------------------------------- docs/api/v2/names_test.go | 9 ++++++++- 2 files changed, 14 insertions(+), 32 deletions(-) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index e889ffe0..e4a98861 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -15,35 +15,27 @@ const ( // single repository name slash-delimited component RepositoryNameComponentMinLength = 2 - // RepositoryNameComponentMaxLength is the maximum number of characters in a - // single repository name slash-delimited component - RepositoryNameComponentMaxLength = 30 - // RepositoryNameMinComponents is the minimum number of slash-delimited // components that a repository name must have RepositoryNameMinComponents = 1 - // RepositoryNameMaxComponents is the maximum number of slash-delimited - // components that a repository name must have - RepositoryNameMaxComponents = 5 - // RepositoryNameTotalLengthMax is the maximum total number of characters in // a repository name RepositoryNameTotalLengthMax = 255 ) -// RepositoryNameComponentRegexp restricts registtry path components names to -// start with at least two letters or numbers, with following parts able to -// separated by one period, dash or underscore. +// RepositoryNameComponentRegexp restricts registry path component names to +// start with at least one letter or number, with following parts able to +// be separated by one period, dash or underscore. var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) // RepositoryNameComponentAnchoredRegexp is the version of // RepositoryNameComponentRegexp which must completely match the content var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) -// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to -// 5 path components, separated by a forward slash. -var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String()) +// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow +// multiple path components, separated by a forward slash. +var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) // TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) @@ -56,19 +48,10 @@ var ( // RepositoryNameComponentMinLength ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) - // ErrRepositoryNameComponentLong is returned when a repository name - // contains a component which is longer than - // RepositoryNameComponentMaxLength - ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength) - // ErrRepositoryNameMissingComponents is returned when a repository name // contains fewer than RepositoryNameMinComponents components ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) - // ErrRepositoryNameTooManyComponents is returned when a repository name - // contains more than RepositoryNameMaxComponents components - ErrRepositoryNameTooManyComponents = fmt.Errorf("repository name %v or less components", RepositoryNameMaxComponents) - // ErrRepositoryNameLong is returned when a repository name is longer than // RepositoryNameTotalLengthMax ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) @@ -103,19 +86,11 @@ func ValidateRespositoryName(name string) error { return ErrRepositoryNameMissingComponents } - if len(components) > RepositoryNameMaxComponents { - return ErrRepositoryNameTooManyComponents - } - for _, component := range components { if len(component) < RepositoryNameComponentMinLength { return ErrRepositoryNameComponentShort } - if len(component) > RepositoryNameComponentMaxLength { - return ErrRepositoryNameComponentLong - } - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { return ErrRepositoryNameComponentInvalid } diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 69ba5305..de6a168f 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -1,6 +1,7 @@ package v2 import ( + "strings" "testing" ) @@ -23,7 +24,6 @@ func TestRepositoryNameRegexp(t *testing.T) { }, { input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - err: ErrRepositoryNameTooManyComponents, }, { input: "aa/aa/bb/bb/bb", @@ -66,6 +66,13 @@ func TestRepositoryNameRegexp(t *testing.T) { input: "a-/a/a/a", err: ErrRepositoryNameComponentInvalid, }, + { + input: strings.Repeat("a", 255), + }, + { + input: strings.Repeat("a", 256), + err: ErrRepositoryNameLong, + }, } { failf := func(format string, v ...interface{}) { From eccae81c9e9daa992baae805c54913ca0a643664 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Mon, 9 Mar 2015 16:23:27 -0700 Subject: [PATCH 048/501] minor refactor + tests for app.go just to improve test coverage. Signed-off-by: David Lawrence (github: endophage) --- docs/handlers/app.go | 68 +++++++++++++++++++++------------------ docs/handlers/app_test.go | 68 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 31 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 12837cc8..4d860cc4 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -304,37 +304,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont var accessRecords []auth.Access if repo != "" { - resource := auth.Resource{ - Type: "repository", - Name: repo, - } - - switch r.Method { - case "GET", "HEAD": - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "*", - }) - } + accessRecords = appendAccessRecords(accessRecords, r.Method, repo) } else { // Only allow the name not to be set on the base route. if app.nameRequired(r) { @@ -411,3 +381,39 @@ func apiBase(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, emptyJSON) } + +// appendAccessRecords checks the method and adds the appropriate Access records to the records list. +func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { + resource := auth.Resource{ + Type: "repository", + Name: repo, + } + + switch method { + case "GET", "HEAD": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + records = append(records, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + records = append(records, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return records +} diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index ba580b11..80f92490 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -5,10 +5,12 @@ import ( "net/http" "net/http/httptest" "net/url" + "reflect" "testing" "github.com/docker/distribution/configuration" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -200,3 +202,69 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) } } + +// Test the access record accumulator +func TestAppendAccessRecords(t *testing.T) { + repo := "testRepo" + + expectedResource := auth.Resource{ + Type: "repository", + Name: repo, + } + + expectedPullRecord := auth.Access{ + Resource: expectedResource, + Action: "pull", + } + expectedPushRecord := auth.Access{ + Resource: expectedResource, + Action: "push", + } + expectedAllRecord := auth.Access{ + Resource: expectedResource, + Action: "*", + } + + records := []auth.Access{} + result := appendAccessRecords(records, "GET", repo) + expectedResult := []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "HEAD", repo) + expectedResult = []auth.Access{expectedPullRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "POST", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PUT", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "PATCH", repo) + expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + + records = []auth.Access{} + result = appendAccessRecords(records, "DELETE", repo) + expectedResult = []auth.Access{expectedAllRecord} + if ok := reflect.DeepEqual(result, expectedResult); !ok { + t.Fatalf("Actual access record differs from expected") + } + +} From 3e658d29a667dee19e4e35e578fd0274a7df221b Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 10 Mar 2015 14:40:58 -0700 Subject: [PATCH 049/501] digest: Minor refactoring Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/handlers/api_test.go | 5 ++++- docs/storage/filereader_test.go | 6 +++++- docs/storage/filewriter_test.go | 18 +++++++++++++++--- docs/storage/layerupload.go | 5 ++++- 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 22f2d9ca..ab8187c1 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -236,7 +236,10 @@ func TestLayerAPI(t *testing.T) { }) // Verify the body - verifier := digest.NewDigestVerifier(layerDigest) + verifier, err := digest.NewDigestVerifier(layerDigest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } io.Copy(verifier, resp.Body) if !verifier.Verified() { diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go index 7c554e8b..8a077603 100644 --- a/docs/storage/filereader_test.go +++ b/docs/storage/filereader_test.go @@ -41,7 +41,11 @@ func TestSimpleRead(t *testing.T) { t.Fatalf("error allocating file reader: %v", err) } - verifier := digest.NewDigestVerifier(dgst) + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("error getting digest verifier: %s", err) + } + io.Copy(verifier, fr) if !verifier.Verified() { diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index 06db31f3..a8ea6241 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -55,7 +55,11 @@ func TestSimpleWrite(t *testing.T) { } defer fr.Close() - verifier := digest.NewDigestVerifier(dgst) + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, fr) if !verifier.Verified() { @@ -94,7 +98,11 @@ func TestSimpleWrite(t *testing.T) { } defer fr.Close() - verifier = digest.NewDigestVerifier(doubledgst) + verifier, err = digest.NewDigestVerifier(doubledgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, fr) if !verifier.Verified() { @@ -141,7 +149,11 @@ func TestSimpleWrite(t *testing.T) { } defer fr.Close() - verifier = digest.NewDigestVerifier(doubledgst) + verifier, err = digest.NewDigestVerifier(doubledgst) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + io.Copy(verifier, fr) if !verifier.Verified() { diff --git a/docs/storage/layerupload.go b/docs/storage/layerupload.go index 69b547f5..fdb00e93 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerupload.go @@ -85,7 +85,10 @@ func (luc *layerUploadController) Cancel() error { // validateLayer checks the layer data against the digest, returning an error // if it does not match. The canonical digest is returned. func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) { - digestVerifier := digest.NewDigestVerifier(dgst) + digestVerifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return "", err + } // TODO(stevvooe): Store resumable hash calculations in upload directory // in driver. Something like a file at path /resumablehash/ From 952f39edffff1f7508366696745a6a76f9390915 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Tue, 3 Mar 2015 08:57:52 -0800 Subject: [PATCH 050/501] Refactoring cloudfactory layer handler into a more generic storage middleware concept. This also breaks the dependency the storage package had on goamz Signed-off-by: David Lawrence (github: endophage) --- docs/handlers/app.go | 23 +++-- docs/handlers/layer.go | 17 +--- docs/storage/delegatelayerhandler.go | 95 ------------------- .../middleware/cloudfront/middleware.go} | 50 +++++----- .../driver/middleware/storagemiddleware.go | 40 ++++++++ docs/storage/layerhandler.go | 51 ---------- docs/storage/layerreader.go | 16 +++- docs/storage/layerstore.go | 2 +- .../{layerupload.go => layerwriter.go} | 83 ++++++++-------- 9 files changed, 139 insertions(+), 238 deletions(-) delete mode 100644 docs/storage/delegatelayerhandler.go rename docs/storage/{cloudfrontlayerhandler.go => driver/middleware/cloudfront/middleware.go} (64%) create mode 100644 docs/storage/driver/middleware/storagemiddleware.go delete mode 100644 docs/storage/layerhandler.go rename docs/storage/{layerupload.go => layerwriter.go} (69%) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 4d860cc4..f3f960cb 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -16,6 +16,7 @@ import ( "github.com/docker/distribution/registry/storage" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" "github.com/gorilla/mux" "golang.org/x/net/context" ) @@ -41,8 +42,6 @@ type App struct { sink notifications.Sink source notifications.SourceRecord } - - layerHandler storage.LayerHandler // allows dispatch of layer serving to external provider } // Value intercepts calls context.Context.Value, returning the current app id, @@ -101,14 +100,22 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.accessController = accessController } - layerHandlerType := configuration.LayerHandler.Type() + for _, mw := range configuration.Middleware { + if mw.Inject == "registry" { + // registry middleware can director wrap app.registry identically to storage middlewares with driver + panic(fmt.Sprintf("unable to configure registry middleware (%s): %v", mw.Name, err)) + } else if mw.Inject == "repository" { + // we have to do something more intelligent with repository middleware, It needs to be staged + // for later to be wrapped around the repository at request time. + panic(fmt.Sprintf("unable to configure repository middleware (%s): %v", mw.Name, err)) + } else if mw.Inject == "storage" { + smw, err := storagemiddleware.GetStorageMiddleware(mw.Name, mw.Options, app.driver) - if layerHandlerType != "" { - lh, err := storage.GetLayerHandler(layerHandlerType, configuration.LayerHandler.Parameters(), app.driver) - if err != nil { - panic(fmt.Sprintf("unable to configure layer handler (%s): %v", layerHandlerType, err)) + if err != nil { + panic(fmt.Sprintf("unable to configure storage middleware (%s): %v", mw.Name, err)) + } + app.driver = smw } - app.layerHandler = lh } return app diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 913002e0..9e0e440c 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -49,8 +49,8 @@ type layerHandler struct { // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(lh).Debug("GetImageLayer") - layers := lh.Repository.Layers() - layer, err := layers.Fetch(lh.Digest) + layerStore := lh.Repository.Layers() + layerReader, err := layerStore.Fetch(lh.Digest) if err != nil { switch err := err.(type) { @@ -62,17 +62,6 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { } return } - defer layer.Close() - w.Header().Set("Docker-Content-Digest", lh.Digest.String()) - - if lh.layerHandler != nil { - handler, _ := lh.layerHandler.Resolve(layer) - if handler != nil { - handler.ServeHTTP(w, r) - return - } - } - - http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) + layerReader.ServeHTTP(w, r) } diff --git a/docs/storage/delegatelayerhandler.go b/docs/storage/delegatelayerhandler.go deleted file mode 100644 index 62b08b22..00000000 --- a/docs/storage/delegatelayerhandler.go +++ /dev/null @@ -1,95 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// delegateLayerHandler provides a simple implementation of layerHandler that -// simply issues HTTP Temporary Redirects to the URL provided by the -// storagedriver for a given Layer. -type delegateLayerHandler struct { - storageDriver storagedriver.StorageDriver - pathMapper *pathMapper - duration time.Duration -} - -var _ LayerHandler = &delegateLayerHandler{} - -func newDelegateLayerHandler(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) { - duration := 20 * time.Minute - d, ok := options["duration"] - if ok { - switch d := d.(type) { - case time.Duration: - duration = d - case string: - dur, err := time.ParseDuration(d) - if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) - } - duration = dur - } - } - - return &delegateLayerHandler{storageDriver: storageDriver, pathMapper: defaultPathMapper, duration: duration}, nil -} - -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. -func (lh *delegateLayerHandler) Resolve(layer distribution.Layer) (http.Handler, error) { - // TODO(bbland): This is just a sanity check to ensure that the - // storagedriver supports url generation. It would be nice if we didn't have - // to do this twice for non-GET requests. - layerURL, err := lh.urlFor(layer, map[string]interface{}{"method": "GET"}) - if err != nil { - return nil, err - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - layerURL, err = lh.urlFor(layer, map[string]interface{}{"method": r.Method}) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - - http.Redirect(w, r, layerURL, http.StatusTemporaryRedirect) - }), nil -} - -// urlFor returns a download URL for the given layer, or the empty string if -// unsupported. -func (lh *delegateLayerHandler) urlFor(layer distribution.Layer, options map[string]interface{}) (string, error) { - // Crack open the layer to get at the layerStore - layerRd, ok := layer.(*layerReader) - if !ok { - // TODO(stevvooe): We probably want to find a better way to get at the - // underlying filesystem path for a given layer. Perhaps, the layer - // handler should have its own layer store but right now, it is not - // request scoped. - return "", fmt.Errorf("unsupported layer type: cannot resolve blob path: %v", layer) - } - - if options == nil { - options = make(map[string]interface{}) - } - options["expiry"] = time.Now().Add(lh.duration) - - layerURL, err := lh.storageDriver.URLFor(layerRd.path, options) - if err != nil { - return "", err - } - - return layerURL, nil -} - -// init registers the delegate layerHandler backend. -func init() { - RegisterLayerHandler("delegate", LayerHandlerInitFunc(newDelegateLayerHandler)) -} diff --git a/docs/storage/cloudfrontlayerhandler.go b/docs/storage/driver/middleware/cloudfront/middleware.go similarity index 64% rename from docs/storage/cloudfrontlayerhandler.go rename to docs/storage/driver/middleware/cloudfront/middleware.go index 82bc313d..d3c5e44f 100644 --- a/docs/storage/cloudfrontlayerhandler.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -1,34 +1,36 @@ -package storage +// Package middleware - cloudfront wrapper for storage libs +// N.B. currently only works with S3, not arbitrary sites +// +package middleware import ( "crypto/x509" "encoding/pem" "fmt" "io/ioutil" - "net/http" "net/url" "time" "github.com/AdRoll/goamz/cloudfront" - "github.com/docker/distribution" storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) -// cloudFrontLayerHandler provides an simple implementation of layerHandler that +// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that // constructs temporary signed CloudFront URLs from the storagedriver layer URL, // then issues HTTP Temporary Redirects to this CloudFront content URL. -type cloudFrontLayerHandler struct { - cloudfront *cloudfront.CloudFront - delegateLayerHandler *delegateLayerHandler - duration time.Duration +type cloudFrontStorageMiddleware struct { + storagedriver.StorageDriver + cloudfront *cloudfront.CloudFront + duration time.Duration } -var _ LayerHandler = &cloudFrontLayerHandler{} +var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} // newCloudFrontLayerHandler constructs and returns a new CloudFront // LayerHandler implementation. // Required options: baseurl, privatekey, keypairid -func newCloudFrontLayerHandler(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) { +func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { base, ok := options["baseurl"] if !ok { return nil, fmt.Errorf("No baseurl provided") @@ -68,12 +70,6 @@ func newCloudFrontLayerHandler(storageDriver storagedriver.StorageDriver, option return nil, err } - lh, err := newDelegateLayerHandler(storageDriver, options) - if err != nil { - return nil, err - } - dlh := lh.(*delegateLayerHandler) - cf := cloudfront.New(baseURL, privateKey, keypairID) duration := 20 * time.Minute @@ -91,33 +87,33 @@ func newCloudFrontLayerHandler(storageDriver storagedriver.StorageDriver, option } } - return &cloudFrontLayerHandler{cloudfront: cf, delegateLayerHandler: dlh, duration: duration}, nil + return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil } // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontLayerHandler) Resolve(layer distribution.Layer) (http.Handler, error) { - layerURLStr, err := lh.delegateLayerHandler.urlFor(layer, nil) +func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { + // TODO(endophage): currently only supports S3 + options["expiry"] = time.Now().Add(lh.duration) + + layerURLStr, err := lh.StorageDriver.URLFor(path, options) if err != nil { - return nil, err + return "", err } layerURL, err := url.Parse(layerURLStr) if err != nil { - return nil, err + return "", err } cfURL, err := lh.cloudfront.CannedSignedURL(layerURL.Path, "", time.Now().Add(lh.duration)) if err != nil { - return nil, err + return "", err } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, cfURL, http.StatusTemporaryRedirect) - }), nil + return cfURL, nil } // init registers the cloudfront layerHandler backend. func init() { - RegisterLayerHandler("cloudfront", LayerHandlerInitFunc(newCloudFrontLayerHandler)) + storagemiddleware.RegisterStorageMiddleware("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) } diff --git a/docs/storage/driver/middleware/storagemiddleware.go b/docs/storage/driver/middleware/storagemiddleware.go new file mode 100644 index 00000000..fb633164 --- /dev/null +++ b/docs/storage/driver/middleware/storagemiddleware.go @@ -0,0 +1,40 @@ +package storagemiddleware + +import ( + "fmt" + + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// InitFunc is the type of a StorageMiddleware factory function and is +// used to register the contsructor for different StorageMiddleware backends. +type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) + +var storageMiddlewares map[string]InitFunc + +// RegisterStorageMiddleware is used to register an StorageMiddlewareInitFunc for +// a StorageMiddleware backend with the given name. +func RegisterStorageMiddleware(name string, initFunc InitFunc) error { + if storageMiddlewares == nil { + storageMiddlewares = make(map[string]InitFunc) + } + if _, exists := storageMiddlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + storageMiddlewares[name] = initFunc + + return nil +} + +// GetStorageMiddleware constructs a StorageMiddleware +// with the given options using the named backend. +func GetStorageMiddleware(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { + if storageMiddlewares != nil { + if initFunc, exists := storageMiddlewares[name]; exists { + return initFunc(storageDriver, options) + } + } + + return nil, fmt.Errorf("no storage middleware registered with name: %s", name) +} diff --git a/docs/storage/layerhandler.go b/docs/storage/layerhandler.go deleted file mode 100644 index b03bc250..00000000 --- a/docs/storage/layerhandler.go +++ /dev/null @@ -1,51 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - - "github.com/docker/distribution" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// LayerHandler provides middleware for serving the contents of a Layer. -type LayerHandler interface { - // Resolve returns an http.Handler which can serve the contents of a given - // Layer if possible, or nil and an error when unsupported. This may - // directly serve the contents of the layer or issue a redirect to another - // URL hosting the content. - Resolve(layer distribution.Layer) (http.Handler, error) -} - -// LayerHandlerInitFunc is the type of a LayerHandler factory function and is -// used to register the contsructor for different LayerHandler backends. -type LayerHandlerInitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (LayerHandler, error) - -var layerHandlers map[string]LayerHandlerInitFunc - -// RegisterLayerHandler is used to register an LayerHandlerInitFunc for -// a LayerHandler backend with the given name. -func RegisterLayerHandler(name string, initFunc LayerHandlerInitFunc) error { - if layerHandlers == nil { - layerHandlers = make(map[string]LayerHandlerInitFunc) - } - if _, exists := layerHandlers[name]; exists { - return fmt.Errorf("name already registered: %s", name) - } - - layerHandlers[name] = initFunc - - return nil -} - -// GetLayerHandler constructs a LayerHandler -// with the given options using the named backend. -func GetLayerHandler(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (LayerHandler, error) { - if layerHandlers != nil { - if initFunc, exists := layerHandlers[name]; exists { - return initFunc(storageDriver, options) - } - } - - return nil, fmt.Errorf("no layer handler registered with name: %s", name) -} diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 1de98e50..20050f11 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -1,13 +1,14 @@ package storage import ( + "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/digest" ) -// layerReadSeeker implements Layer and provides facilities for reading and +// LayerRead implements Layer and provides facilities for reading and // seeking. type layerReader struct { fileReader @@ -17,6 +18,10 @@ type layerReader struct { var _ distribution.Layer = &layerReader{} +func (lrs *layerReader) Path() string { + return lrs.path +} + func (lrs *layerReader) Digest() digest.Digest { return lrs.digest } @@ -33,3 +38,12 @@ func (lrs *layerReader) CreatedAt() time.Time { func (lrs *layerReader) Close() error { return lrs.closeWithErr(distribution.ErrLayerClosed) } + +func (lrs *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Docker-Content-Digest", lrs.digest.String()) + + if url, err := lrs.fileReader.driver.URLFor(lrs.Path(), map[string]interface{}{}); err == nil { + http.Redirect(w, r, url, http.StatusTemporaryRedirect) + } + http.ServeContent(w, r, lrs.Digest().String(), lrs.CreatedAt(), lrs) +} diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index f546529e..05881749 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -138,7 +138,7 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di return nil, err } - return &layerUploadController{ + return &layerWriter{ layerStore: ls, uuid: uuid, startedAt: startedAt, diff --git a/docs/storage/layerupload.go b/docs/storage/layerwriter.go similarity index 69% rename from docs/storage/layerupload.go rename to docs/storage/layerwriter.go index fdb00e93..27bbade1 100644 --- a/docs/storage/layerupload.go +++ b/docs/storage/layerwriter.go @@ -13,9 +13,11 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) -// layerUploadController is used to control the various aspects of resumable +var _ distribution.LayerUpload = &layerWriter{} + +// layerWriter is used to control the various aspects of resumable // layer upload. It implements the LayerUpload interface. -type layerUploadController struct { +type layerWriter struct { layerStore *layerStore uuid string @@ -26,65 +28,64 @@ type layerUploadController struct { bufferedFileWriter } -var _ distribution.LayerUpload = &layerUploadController{} +var _ distribution.LayerUpload = &layerWriter{} // UUID returns the identifier for this upload. -func (luc *layerUploadController) UUID() string { - return luc.uuid +func (lw *layerWriter) UUID() string { + return lw.uuid } -func (luc *layerUploadController) StartedAt() time.Time { - return luc.startedAt +func (lw *layerWriter) StartedAt() time.Time { + return lw.startedAt } // Finish marks the upload as completed, returning a valid handle to the // uploaded layer. The final size and checksum are validated against the // contents of the uploaded layer. The checksum should be provided in the // format :. -func (luc *layerUploadController) Finish(digest digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Finish") +func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error) { + ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") - err := luc.bufferedFileWriter.Close() + if err := lw.bufferedFileWriter.Close(); err != nil { + return nil, err + } + + canonical, err := lw.validateLayer(digest) if err != nil { return nil, err } - canonical, err := luc.validateLayer(digest) - if err != nil { - return nil, err - } - - if err := luc.moveLayer(canonical); err != nil { + if err := lw.moveLayer(canonical); err != nil { // TODO(stevvooe): Cleanup? return nil, err } // Link the layer blob into the repository. - if err := luc.linkLayer(canonical, digest); err != nil { + if err := lw.linkLayer(canonical, digest); err != nil { return nil, err } - if err := luc.removeResources(); err != nil { + if err := lw.removeResources(); err != nil { return nil, err } - return luc.layerStore.Fetch(canonical) + return lw.layerStore.Fetch(canonical) } // Cancel the layer upload process. -func (luc *layerUploadController) Cancel() error { - ctxu.GetLogger(luc.layerStore.repository.ctx).Debug("(*layerUploadController).Cancel") - if err := luc.removeResources(); err != nil { +func (lw *layerWriter) Cancel() error { + ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") + if err := lw.removeResources(); err != nil { return err } - luc.Close() + lw.Close() return nil } // validateLayer checks the layer data against the digest, returning an error // if it does not match. The canonical digest is returned. -func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Digest, error) { +func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { digestVerifier, err := digest.NewDigestVerifier(dgst) if err != nil { return "", err @@ -96,7 +97,7 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige // then only have to fetch the difference. // Read the file from the backend driver and validate it. - fr, err := newFileReader(luc.bufferedFileWriter.driver, luc.path) + fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) if err != nil { return "", err } @@ -125,8 +126,8 @@ func (luc *layerUploadController) validateLayer(dgst digest.Digest) (digest.Dige // moveLayer moves the data into its final, hash-qualified destination, // identified by dgst. The layer should be validated before commencing the // move. -func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { - blobPath, err := luc.layerStore.repository.registry.pm.path(blobDataPathSpec{ +func (lw *layerWriter) moveLayer(dgst digest.Digest) error { + blobPath, err := lw.layerStore.repository.registry.pm.path(blobDataPathSpec{ digest: dgst, }) @@ -135,7 +136,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { } // Check for existence - if _, err := luc.driver.Stat(blobPath); err != nil { + if _, err := lw.driver.Stat(blobPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // ensure that it doesn't exist. @@ -154,7 +155,7 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { // the size here and write a zero-length file to blobPath if this is the // case. For the most part, this should only ever happen with zero-length // tars. - if _, err := luc.driver.Stat(luc.path); err != nil { + if _, err := lw.driver.Stat(lw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // HACK(stevvooe): This is slightly dangerous: if we verify above, @@ -163,24 +164,24 @@ func (luc *layerUploadController) moveLayer(dgst digest.Digest) error { // prevent this horrid thing, we employ the hack of only allowing // to this happen for the zero tarsum. if dgst == digest.DigestSha256EmptyTar { - return luc.driver.PutContent(blobPath, []byte{}) + return lw.driver.PutContent(blobPath, []byte{}) } // We let this fail during the move below. logrus. - WithField("upload.uuid", luc.UUID()). + WithField("upload.uuid", lw.UUID()). WithField("digest", dgst).Warnf("attempted to move zero-length content with non-zero digest") default: return err // unrelated error } } - return luc.driver.Move(luc.path, blobPath) + return lw.driver.Move(lw.path, blobPath) } // linkLayer links a valid, written layer blob into the registry under the // named repository for the upload controller. -func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { +func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { dgsts := append([]digest.Digest{canonical}, aliases...) // Don't make duplicate links. @@ -192,8 +193,8 @@ func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ... } seenDigests[dgst] = struct{}{} - layerLinkPath, err := luc.layerStore.repository.registry.pm.path(layerLinkPathSpec{ - name: luc.layerStore.repository.Name(), + layerLinkPath, err := lw.layerStore.repository.registry.pm.path(layerLinkPathSpec{ + name: lw.layerStore.repository.Name(), digest: dgst, }) @@ -201,7 +202,7 @@ func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ... return err } - if err := luc.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { + if err := lw.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { return err } } @@ -212,10 +213,10 @@ func (luc *layerUploadController) linkLayer(canonical digest.Digest, aliases ... // removeResources should clean up all resources associated with the upload // instance. An error will be returned if the clean up cannot proceed. If the // resources are already not present, no error will be returned. -func (luc *layerUploadController) removeResources() error { - dataPath, err := luc.layerStore.repository.registry.pm.path(uploadDataPathSpec{ - name: luc.layerStore.repository.Name(), - uuid: luc.uuid, +func (lw *layerWriter) removeResources() error { + dataPath, err := lw.layerStore.repository.registry.pm.path(uploadDataPathSpec{ + name: lw.layerStore.repository.Name(), + uuid: lw.uuid, }) if err != nil { @@ -226,7 +227,7 @@ func (luc *layerUploadController) removeResources() error { // upload related files. dirPath := path.Dir(dataPath) - if err := luc.driver.Delete(dirPath); err != nil { + if err := lw.driver.Delete(dirPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // already gone! From 30bcc17b85aa745deab58c545f40d8e6f79962d5 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Fri, 6 Mar 2015 10:45:16 -0500 Subject: [PATCH 051/501] Middleware! Convert middleware in the config to be a map of type->[]Middleware Add support for registry & repository middleware. Some naming updates as well. Signed-off-by: Andy Goldstein --- docs/handlers/app.go | 39 ++++++++++++------- docs/middleware/registry/middleware.go | 39 +++++++++++++++++++ docs/middleware/repository/middleware.go | 39 +++++++++++++++++++ .../middleware/cloudfront/middleware.go | 2 +- .../driver/middleware/storagemiddleware.go | 9 ++--- 5 files changed, 107 insertions(+), 21 deletions(-) create mode 100644 docs/middleware/registry/middleware.go create mode 100644 docs/middleware/repository/middleware.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f3f960cb..8cd7c739 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -13,6 +13,8 @@ import ( "github.com/docker/distribution/notifications" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/distribution/registry/storage" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" @@ -89,7 +91,16 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App } app.configureEvents(&configuration) + app.registry = storage.NewRegistryWithDriver(app.driver) + for _, mw := range configuration.Middleware["registry"] { + rmw, err := registrymiddleware.Get(mw.Name, mw.Options, app.registry) + if err != nil { + panic(fmt.Sprintf("unable to configure registry middleware (%s): %s", mw.Name, err)) + } + app.registry = rmw + } + authType := configuration.Auth.Type() if authType != "" { @@ -100,22 +111,12 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.accessController = accessController } - for _, mw := range configuration.Middleware { - if mw.Inject == "registry" { - // registry middleware can director wrap app.registry identically to storage middlewares with driver - panic(fmt.Sprintf("unable to configure registry middleware (%s): %v", mw.Name, err)) - } else if mw.Inject == "repository" { - // we have to do something more intelligent with repository middleware, It needs to be staged - // for later to be wrapped around the repository at request time. - panic(fmt.Sprintf("unable to configure repository middleware (%s): %v", mw.Name, err)) - } else if mw.Inject == "storage" { - smw, err := storagemiddleware.GetStorageMiddleware(mw.Name, mw.Options, app.driver) - - if err != nil { - panic(fmt.Sprintf("unable to configure storage middleware (%s): %v", mw.Name, err)) - } - app.driver = smw + for _, mw := range configuration.Middleware["storage"] { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, app.driver) + if err != nil { + panic(fmt.Sprintf("unable to configure storage middleware (%s): %v", mw.Name, err)) } + app.driver = smw } return app @@ -256,6 +257,14 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Repository = notifications.Listen( repository, app.eventBridge(context, r)) + + for _, mw := range app.Config.Middleware["repository"] { + rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, context.Repository) + if err != nil { + panic(fmt.Sprintf("unable to configure repository middleware (%s): %s", mw.Name, err)) + } + context.Repository = rmw + } } handler := dispatch(context, r) diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go new file mode 100644 index 00000000..1347b6da --- /dev/null +++ b/docs/middleware/registry/middleware.go @@ -0,0 +1,39 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// InitFunc is the type of a RegistryMiddleware factory function and is +// used to register the contsructor for different RegistryMiddleware backends. +type InitFunc func(registry distribution.Registry, options map[string]interface{}) (distribution.Registry, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RegistryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RegistryMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, registry distribution.Registry) (distribution.Registry, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(registry, options) + } + } + + return nil, fmt.Errorf("no registry middleware registered with name: %s", name) +} diff --git a/docs/middleware/repository/middleware.go b/docs/middleware/repository/middleware.go new file mode 100644 index 00000000..86c3b0a7 --- /dev/null +++ b/docs/middleware/repository/middleware.go @@ -0,0 +1,39 @@ +package middleware + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// InitFunc is the type of a RepositoryMiddleware factory function and is +// used to register the contsructor for different RepositoryMiddleware backends. +type InitFunc func(repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) + +var middlewares map[string]InitFunc + +// Register is used to register an InitFunc for +// a RepositoryMiddleware backend with the given name. +func Register(name string, initFunc InitFunc) error { + if middlewares == nil { + middlewares = make(map[string]InitFunc) + } + if _, exists := middlewares[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + middlewares[name] = initFunc + + return nil +} + +// Get constructs a RepositoryMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { + if middlewares != nil { + if initFunc, exists := middlewares[name]; exists { + return initFunc(repository, options) + } + } + + return nil, fmt.Errorf("no repository middleware registered with name: %s", name) +} diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index d3c5e44f..2d155312 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -115,5 +115,5 @@ func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]in // init registers the cloudfront layerHandler backend. func init() { - storagemiddleware.RegisterStorageMiddleware("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) + storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) } diff --git a/docs/storage/driver/middleware/storagemiddleware.go b/docs/storage/driver/middleware/storagemiddleware.go index fb633164..d88ddd91 100644 --- a/docs/storage/driver/middleware/storagemiddleware.go +++ b/docs/storage/driver/middleware/storagemiddleware.go @@ -12,9 +12,9 @@ type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string var storageMiddlewares map[string]InitFunc -// RegisterStorageMiddleware is used to register an StorageMiddlewareInitFunc for +// Register is used to register an InitFunc for // a StorageMiddleware backend with the given name. -func RegisterStorageMiddleware(name string, initFunc InitFunc) error { +func Register(name string, initFunc InitFunc) error { if storageMiddlewares == nil { storageMiddlewares = make(map[string]InitFunc) } @@ -27,9 +27,8 @@ func RegisterStorageMiddleware(name string, initFunc InitFunc) error { return nil } -// GetStorageMiddleware constructs a StorageMiddleware -// with the given options using the named backend. -func GetStorageMiddleware(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { +// Get constructs a StorageMiddleware with the given options using the named backend. +func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { if storageMiddlewares != nil { if initFunc, exists := storageMiddlewares[name]; exists { return initFunc(storageDriver, options) From 6a72d1aefbecd6f85f469565b210d9021d131b4d Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Fri, 6 Mar 2015 09:48:25 -0800 Subject: [PATCH 052/501] Final polish to cloudfront and larger middleware refactor Signed-off-by: David Lawrence (github: endophage) --- docs/handlers/layer.go | 6 +++--- docs/storage/layerreader.go | 30 +++++++++++++++--------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index 9e0e440c..ae73aee0 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -49,8 +49,8 @@ type layerHandler struct { // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(lh).Debug("GetImageLayer") - layerStore := lh.Repository.Layers() - layerReader, err := layerStore.Fetch(lh.Digest) + layers := lh.Repository.Layers() + layer, err := layers.Fetch(lh.Digest) if err != nil { switch err := err.(type) { @@ -63,5 +63,5 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { return } - layerReader.ServeHTTP(w, r) + layer.ServeHTTP(w, r) } diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 20050f11..b9b05c5c 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -8,7 +8,7 @@ import ( "github.com/docker/distribution/digest" ) -// LayerRead implements Layer and provides facilities for reading and +// layerReader implements Layer and provides facilities for reading and // seeking. type layerReader struct { fileReader @@ -18,32 +18,32 @@ type layerReader struct { var _ distribution.Layer = &layerReader{} -func (lrs *layerReader) Path() string { - return lrs.path +func (lr *layerReader) Path() string { + return lr.path } -func (lrs *layerReader) Digest() digest.Digest { - return lrs.digest +func (lr *layerReader) Digest() digest.Digest { + return lr.digest } -func (lrs *layerReader) Length() int64 { - return lrs.size +func (lr *layerReader) Length() int64 { + return lr.size } -func (lrs *layerReader) CreatedAt() time.Time { - return lrs.modtime +func (lr *layerReader) CreatedAt() time.Time { + return lr.modtime } // Close the layer. Should be called when the resource is no longer needed. -func (lrs *layerReader) Close() error { - return lrs.closeWithErr(distribution.ErrLayerClosed) +func (lr *layerReader) Close() error { + return lr.closeWithErr(distribution.ErrLayerClosed) } -func (lrs *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Docker-Content-Digest", lrs.digest.String()) +func (lr *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Docker-Content-Digest", lr.digest.String()) - if url, err := lrs.fileReader.driver.URLFor(lrs.Path(), map[string]interface{}{}); err == nil { + if url, err := lr.fileReader.driver.URLFor(lr.Path(), map[string]interface{}{}); err == nil { http.Redirect(w, r, url, http.StatusTemporaryRedirect) } - http.ServeContent(w, r, lrs.Digest().String(), lrs.CreatedAt(), lrs) + http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) } From 5c3f53b70f8f576aca701eb07ef32b3abfdf7bd3 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Mon, 9 Mar 2015 12:42:23 -0400 Subject: [PATCH 053/501] Fix Godoc typos Signed-off-by: Andy Goldstein --- docs/auth/auth.go | 2 +- docs/middleware/registry/middleware.go | 2 +- docs/middleware/repository/middleware.go | 2 +- docs/storage/driver/middleware/storagemiddleware.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index cd6ee096..a8499342 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -110,7 +110,7 @@ func (uic userInfoContext) Value(key interface{}) interface{} { } // InitFunc is the type of an AccessController factory function and is used -// to register the contsructor for different AccesController backends. +// to register the constructor for different AccesController backends. type InitFunc func(options map[string]interface{}) (AccessController, error) var accessControllers map[string]InitFunc diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go index 1347b6da..d3e88810 100644 --- a/docs/middleware/registry/middleware.go +++ b/docs/middleware/registry/middleware.go @@ -7,7 +7,7 @@ import ( ) // InitFunc is the type of a RegistryMiddleware factory function and is -// used to register the contsructor for different RegistryMiddleware backends. +// used to register the constructor for different RegistryMiddleware backends. type InitFunc func(registry distribution.Registry, options map[string]interface{}) (distribution.Registry, error) var middlewares map[string]InitFunc diff --git a/docs/middleware/repository/middleware.go b/docs/middleware/repository/middleware.go index 86c3b0a7..d6330fc4 100644 --- a/docs/middleware/repository/middleware.go +++ b/docs/middleware/repository/middleware.go @@ -7,7 +7,7 @@ import ( ) // InitFunc is the type of a RepositoryMiddleware factory function and is -// used to register the contsructor for different RepositoryMiddleware backends. +// used to register the constructor for different RepositoryMiddleware backends. type InitFunc func(repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) var middlewares map[string]InitFunc diff --git a/docs/storage/driver/middleware/storagemiddleware.go b/docs/storage/driver/middleware/storagemiddleware.go index d88ddd91..7e40a8dd 100644 --- a/docs/storage/driver/middleware/storagemiddleware.go +++ b/docs/storage/driver/middleware/storagemiddleware.go @@ -7,7 +7,7 @@ import ( ) // InitFunc is the type of a StorageMiddleware factory function and is -// used to register the contsructor for different StorageMiddleware backends. +// used to register the constructor for different StorageMiddleware backends. type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) var storageMiddlewares map[string]InitFunc From 83571e574c0b3fc05b9185adb3f63fffbb4525d4 Mon Sep 17 00:00:00 2001 From: David Lawrence Date: Mon, 9 Mar 2015 10:55:52 -0700 Subject: [PATCH 054/501] don't panic during a request when configuring repository middleware. Return a 500 with an appropriate error Signed-off-by: David Lawrence (github: endophage) --- docs/handlers/app.go | 71 ++++++++++++++++++++++++++----------- docs/storage/layerreader.go | 6 +--- 2 files changed, 52 insertions(+), 25 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 8cd7c739..1b5effbc 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -89,16 +89,17 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // a health check. panic(err) } + app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) + if err != nil { + panic(err) + } app.configureEvents(&configuration) app.registry = storage.NewRegistryWithDriver(app.driver) - for _, mw := range configuration.Middleware["registry"] { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, app.registry) - if err != nil { - panic(fmt.Sprintf("unable to configure registry middleware (%s): %s", mw.Name, err)) - } - app.registry = rmw + app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) + if err != nil { + panic(err) } authType := configuration.Auth.Type() @@ -111,14 +112,6 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.accessController = accessController } - for _, mw := range configuration.Middleware["storage"] { - smw, err := storagemiddleware.Get(mw.Name, mw.Options, app.driver) - if err != nil { - panic(fmt.Sprintf("unable to configure storage middleware (%s): %v", mw.Name, err)) - } - app.driver = smw - } - return app } @@ -258,12 +251,13 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { repository, app.eventBridge(context, r)) - for _, mw := range app.Config.Middleware["repository"] { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, context.Repository) - if err != nil { - panic(fmt.Sprintf("unable to configure repository middleware (%s): %s", mw.Name, err)) - } - context.Repository = rmw + context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) + if err != nil { + ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + context.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + serveJSON(w, context.Errors) + return } } @@ -433,3 +427,40 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au } return records } + +// applyRegistryMiddleware wraps a registry instance with the configured middlewares +func applyRegistryMiddleware(registry distribution.Registry, middlewares []configuration.Middleware) (distribution.Registry, error) { + for _, mw := range middlewares { + rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) + if err != nil { + return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) + } + registry = rmw + } + return registry, nil + +} + +// applyRepoMiddleware wraps a repository with the configured middlewares +func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { + for _, mw := range middlewares { + rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) + if err != nil { + return nil, err + } + repository = rmw + } + return repository, nil +} + +// applyStorageMiddleware wraps a storage driver with the configured middlewares +func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { + for _, mw := range middlewares { + smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) + if err != nil { + return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) + } + driver = smw + } + return driver, nil +} diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index b9b05c5c..1129eb9e 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -18,10 +18,6 @@ type layerReader struct { var _ distribution.Layer = &layerReader{} -func (lr *layerReader) Path() string { - return lr.path -} - func (lr *layerReader) Digest() digest.Digest { return lr.digest } @@ -42,7 +38,7 @@ func (lr *layerReader) Close() error { func (lr *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("Docker-Content-Digest", lr.digest.String()) - if url, err := lr.fileReader.driver.URLFor(lr.Path(), map[string]interface{}{}); err == nil { + if url, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{}); err == nil { http.Redirect(w, r, url, http.StatusTemporaryRedirect) } http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) From 4b5af16fdc2e7cfe8d2364e033164b710d8482fa Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Wed, 11 Mar 2015 15:10:49 -0400 Subject: [PATCH 055/501] Send WWW-Authenticate header for silly auth Signed-off-by: Andy Goldstein --- docs/auth/silly/access.go | 2 +- docs/handlers/app_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 20448efd..134b0ae5 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -82,7 +82,7 @@ func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { header = fmt.Sprintf("%s,scope=%q", header, ch.scope) } - w.Header().Set("Authorization", header) + w.Header().Set("WWW-Authenticate", header) w.WriteHeader(http.StatusUnauthorized) } diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 80f92490..cd515dd0 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -188,8 +188,8 @@ func TestNewApp(t *testing.T) { } expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" - if req.Header.Get("Authorization") != expectedAuthHeader { - t.Fatalf("unexpected authorization header: %q != %q", req.Header.Get("Authorization"), expectedAuthHeader) + if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { + t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) } var errs v2.Errors From fdd631477622bdb475a6078007ea2975b2231175 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Thu, 12 Mar 2015 17:06:40 -0700 Subject: [PATCH 056/501] Insert request method option storage driver URLFor Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/storage/layerreader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 1129eb9e..9d6d8c8a 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -38,7 +38,7 @@ func (lr *layerReader) Close() error { func (lr *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("Docker-Content-Digest", lr.digest.String()) - if url, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{}); err == nil { + if url, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}); err == nil { http.Redirect(w, r, url, http.StatusTemporaryRedirect) } http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) From 6d1401936821950867515244647d995dde261390 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Thu, 12 Mar 2015 19:31:41 -0700 Subject: [PATCH 057/501] Refactor Layer interface to return a Handler ... Rather than ServeHTTP directly. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/handlers/layer.go | 9 ++++++++- docs/storage/driver/storagedriver.go | 6 +++--- docs/storage/layerreader.go | 29 +++++++++++++++++++++++----- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index ae73aee0..b8230135 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -63,5 +63,12 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { return } - layer.ServeHTTP(w, r) + handler, err := layer.Handler(r) + if err != nil { + ctxu.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) + lh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + handler.ServeHTTP(w, r) } diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index dd8fb4a0..f0fe7fef 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -73,7 +73,7 @@ type StorageDriver interface { // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. - // May return an UnsupportedMethodErr in certain StorageDriver + // May return an ErrUnsupportedMethod in certain StorageDriver // implementations. URLFor(path string, options map[string]interface{}) (string, error) } @@ -85,8 +85,8 @@ type StorageDriver interface { // hyphen. var PathRegexp = regexp.MustCompile(`^(/[a-z0-9._-]+)+$`) -// UnsupportedMethodErr may be returned in the case where a StorageDriver implementation does not support an optional method. -var ErrUnsupportedMethod = errors.New("Unsupported method") +// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. +var ErrUnsupportedMethod = errors.New("unsupported method") // PathNotFoundError is returned when operating on a nonexistent path. type PathNotFoundError struct { diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 9d6d8c8a..414951d9 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -6,6 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" ) // layerReader implements Layer and provides facilities for reading and @@ -35,11 +36,29 @@ func (lr *layerReader) Close() error { return lr.closeWithErr(distribution.ErrLayerClosed) } -func (lr *layerReader) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Docker-Content-Digest", lr.digest.String()) +func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { + var handlerFunc http.HandlerFunc - if url, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}); err == nil { - http.Redirect(w, r, url, http.StatusTemporaryRedirect) + redirectURL, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}) + + switch err { + case nil: + handlerFunc = func(w http.ResponseWriter, r *http.Request) { + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + } + case driver.ErrUnsupportedMethod: + handlerFunc = func(w http.ResponseWriter, r *http.Request) { + // Fallback to serving the content directly. + http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) + } + default: + // Some unexpected error. + return nil, err } - http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Docker-Content-Digest", lr.digest.String()) + handlerFunc.ServeHTTP(w, r) + }), nil } From 594f733e03e9e21153457eff5ccf5d5eb32fa033 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Mon, 23 Mar 2015 18:20:06 -0700 Subject: [PATCH 058/501] storage/driver/azure: Allow non-default realms This enables Azure storage driver to be used with non-default cloud endpoints like Azure China or Azure Government that does not use `.blob.core.windows.net` FQDN suffix. Signed-off-by: Ahmet Alp Balkan --- docs/storage/driver/azure/azure.go | 14 ++++++++++---- docs/storage/driver/azure/azure_test.go | 6 +++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index 6ccbff40..57d8acab 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -24,6 +24,7 @@ const ( paramAccountName = "accountname" paramAccountKey = "accountkey" paramContainer = "container" + paramRealm = "realm" ) type driver struct { @@ -64,12 +65,17 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("No %s parameter provided", paramContainer) } - return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container)) + realm, ok := parameters[paramRealm] + if !ok || fmt.Sprint(realm) == "" { + realm = azure.DefaultBaseUrl + } + + return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) } // New constructs a new Driver with the given Azure Storage Account credentials -func New(accountName, accountKey, container string) (*Driver, error) { - api, err := azure.NewBasicClient(accountName, accountKey) +func New(accountName, accountKey, container, realm string) (*Driver, error) { + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) if err != nil { return nil, err } @@ -343,5 +349,5 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { func is404(err error) bool { e, ok := err.(azure.StorageServiceError) - return ok && e.StatusCode == 404 + return ok && e.StatusCode == http.StatusNotFound } diff --git a/docs/storage/driver/azure/azure_test.go b/docs/storage/driver/azure/azure_test.go index a8fdf3e9..4990ba19 100644 --- a/docs/storage/driver/azure/azure_test.go +++ b/docs/storage/driver/azure/azure_test.go @@ -15,6 +15,7 @@ const ( envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" envContainer = "AZURE_STORAGE_CONTAINER" + envRealm = "AZURE_STORAGE_REALM" ) // Hook up gocheck into the "go test" runner. @@ -25,6 +26,7 @@ func init() { accountName string accountKey string container string + realm string ) config := []struct { @@ -34,6 +36,7 @@ func init() { {envAccountName, &accountName}, {envAccountKey, &accountKey}, {envContainer, &container}, + {envRealm, &realm}, } missing := []string{} @@ -45,7 +48,7 @@ func init() { } azureDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(accountName, accountKey, container) + return New(accountName, accountKey, container, realm) } // Skip Azure storage driver tests if environment variable parameters are not provided @@ -61,5 +64,6 @@ func init() { // paramAccountName: accountName, // paramAccountKey: accountKey, // paramContainer: container, + // paramRealm: realm, // }, skipCheck) } From dffd1babd2e95763302bd5aedbba3ab0b88a2260 Mon Sep 17 00:00:00 2001 From: "Frederick F. Kautz IV" Date: Mon, 23 Mar 2015 21:57:24 -0700 Subject: [PATCH 059/501] Updating MSOpenTech/azure-sdk-for-go to latest master Signed-off-by: Frederick F. Kautz IV --- docs/storage/driver/azure/azure.go | 2 +- docs/storage/driver/azure/blockblob.go | 2 +- docs/storage/driver/azure/blockblob_test.go | 2 +- docs/storage/driver/azure/blockid.go | 2 +- docs/storage/driver/azure/blockid_test.go | 2 +- docs/storage/driver/azure/randomwriter.go | 2 +- docs/storage/driver/azure/randomwriter_test.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index 6ccbff40..20ed2e34 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -15,7 +15,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) const driverName = "azure" diff --git a/docs/storage/driver/azure/blockblob.go b/docs/storage/driver/azure/blockblob.go index d868453f..10b2bf21 100644 --- a/docs/storage/driver/azure/blockblob.go +++ b/docs/storage/driver/azure/blockblob.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) // azureBlockStorage is adaptor between azure.BlobStorageClient and diff --git a/docs/storage/driver/azure/blockblob_test.go b/docs/storage/driver/azure/blockblob_test.go index f1e39027..c29b4742 100644 --- a/docs/storage/driver/azure/blockblob_test.go +++ b/docs/storage/driver/azure/blockblob_test.go @@ -6,7 +6,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) type StorageSimulator struct { diff --git a/docs/storage/driver/azure/blockid.go b/docs/storage/driver/azure/blockid.go index 61f41ebc..f6bda6a8 100644 --- a/docs/storage/driver/azure/blockid.go +++ b/docs/storage/driver/azure/blockid.go @@ -7,7 +7,7 @@ import ( "sync" "time" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) type blockIDGenerator struct { diff --git a/docs/storage/driver/azure/blockid_test.go b/docs/storage/driver/azure/blockid_test.go index 46d52a34..6569e15d 100644 --- a/docs/storage/driver/azure/blockid_test.go +++ b/docs/storage/driver/azure/blockid_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) func Test_blockIdGenerator(t *testing.T) { diff --git a/docs/storage/driver/azure/randomwriter.go b/docs/storage/driver/azure/randomwriter.go index c89dd0a3..b570d559 100644 --- a/docs/storage/driver/azure/randomwriter.go +++ b/docs/storage/driver/azure/randomwriter.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) // blockStorage is the interface required from a block storage service diff --git a/docs/storage/driver/azure/randomwriter_test.go b/docs/storage/driver/azure/randomwriter_test.go index 5201e3b4..2c7480db 100644 --- a/docs/storage/driver/azure/randomwriter_test.go +++ b/docs/storage/driver/azure/randomwriter_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" + azure "github.com/MSOpenTech/azure-sdk-for-go/storage" ) func TestRandomWriter_writeChunkToBlocks(t *testing.T) { From 38ae1cb4613e68e32025493459239519ea66ec59 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 1 Apr 2015 16:27:24 -0700 Subject: [PATCH 060/501] Add redis pool to registry webapp Redis has been integrated with the web application for use with various services. The configuraiton exposes connection details, timeouts and pool parameters. Documentation has been updated accordingly. A few convenience methods have been added to the context package to get loggers with certain fields, exposing some missing functionality from logrus. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 83 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 1b5effbc..f837e861 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -1,10 +1,12 @@ package handlers import ( + "expvar" "fmt" "net" "net/http" "os" + "time" "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" @@ -19,6 +21,7 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "golang.org/x/net/context" ) @@ -44,6 +47,8 @@ type App struct { sink notifications.Sink source notifications.SourceRecord } + + redis *redis.Pool } // Value intercepts calls context.Context.Value, returning the current app id, @@ -95,6 +100,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App } app.configureEvents(&configuration) + app.configureRedis(&configuration) app.registry = storage.NewRegistryWithDriver(app.driver) app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) @@ -174,6 +180,83 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { } } +func (app *App) configureRedis(configuration *configuration.Configuration) { + if configuration.Redis.Addr == "" { + ctxu.GetLogger(app).Infof("redis not configured") + return + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. + ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) + + done := func(err error) { + logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", + ctxu.Since(ctx, "redis.connect.startedat")) + if err != nil { + logger.Errorf("redis: error connecting: %v", err) + } else { + logger.Infof("redis: connect %v", configuration.Redis.Addr) + } + } + + conn, err := redis.DialTimeout("tcp", + configuration.Redis.Addr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) + if err != nil { + ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + configuration.Redis.Addr, err) + done(err) + return nil, err + } + + // authorize the connection + if configuration.Redis.Password != "" { + if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + // select the database to use + if configuration.Redis.DB != 0 { + if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { + defer conn.Close() + done(err) + return nil, err + } + } + + done(nil) + return conn, nil + }, + MaxIdle: configuration.Redis.Pool.MaxIdle, + MaxActive: configuration.Redis.Pool.MaxActive, + IdleTimeout: configuration.Redis.Pool.IdleTimeout, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + // TODO(stevvooe): We can probably do something more interesting + // here with the health package. + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + app.redis = pool + + expvar.Publish("redis", expvar.Func(func() interface{} { + return map[string]interface{}{ + "Config": configuration.Redis, + "Active": app.redis.ActiveCount(), + } + })) + +} + func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. From 6eb804a1ecfde5366ae05464776b748210754f0c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 1 Apr 2015 18:57:59 -0700 Subject: [PATCH 061/501] Stronger validation for uuid field in urls This change adds strong validation for the uuid variable for v2 routes. This is a minor specification change but is okay since the uuid field is controlled by the server. The character set is restricted to avoid path traversal, allowing for alphanumeric values and urlsafe base64 encoding. This change has no effect on client implementations. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 4 ++-- docs/api/v2/routes_test.go | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 5f091bbc..73f8b463 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -28,7 +28,7 @@ var ( Name: "uuid", Type: "opaque", Required: true, - Description: `A uuid identifying the upload. This field can accept almost anything.`, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", } digestPathParameter = ParameterDescriptor{ @@ -985,7 +985,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}", + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index afab71fc..fb268336 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -98,6 +98,7 @@ func TestRouter(t *testing.T) { }, }, { + // support uuid proper RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ @@ -113,6 +114,21 @@ func TestRouter(t *testing.T) { "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", }, }, + { + // supports urlsafe base64 + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", + }, + }, + { + // does not match + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", + StatusCode: http.StatusNotFound, + }, { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag From 06acde06cb89fcc944666806528f48b3ad88d729 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 1 Apr 2015 18:45:13 -0700 Subject: [PATCH 062/501] Avoid crash on invalid Move arguments This chnage prevents a crash when moving from a non-existent directory that has a file as a parent. To prevent this, we simply check that the node is a directory and throws an error if it is not. Signed-off-by: Stephen J Day --- docs/storage/driver/inmemory/mfs.go | 9 +++++++-- docs/storage/driver/testsuites/testsuites.go | 15 ++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/inmemory/mfs.go b/docs/storage/driver/inmemory/mfs.go index 2bf859bc..cdefacfd 100644 --- a/docs/storage/driver/inmemory/mfs.go +++ b/docs/storage/driver/inmemory/mfs.go @@ -212,12 +212,17 @@ func (d *dir) move(src, dst string) error { return errNotExists } - s, ok := sp.(*dir).children[srcFilename] + spd, ok := sp.(*dir) + if !ok { + return errIsNotDir // paranoid. + } + + s, ok := spd.children[srcFilename] if !ok { return errNotExists } - delete(sp.(*dir).children, srcFilename) + delete(spd.children, srcFilename) switch n := s.(type) { case *dir: diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index cfa3a48a..18fd9840 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -15,7 +15,6 @@ import ( "time" storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" ) @@ -591,6 +590,20 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { c.Assert(received, check.DeepEquals, contents) } +// TestMoveInvalid provides various checks for invalid moves. +func (suite *DriverSuite) TestMoveInvalid(c *check.C) { + contents := randomContents(32) + + // Create a regular file. + err := suite.StorageDriver.PutContent("/notadir", contents) + c.Assert(err, check.IsNil) + defer suite.StorageDriver.Delete("/notadir") + + // Now try to move a non-existent file under it. + err = suite.StorageDriver.Move("/notadir/foo", "/notadir/bar") + c.Assert(err, check.NotNil) // non-nil error +} + // TestDelete checks that the delete operation removes data from the storage // driver func (suite *DriverSuite) TestDelete(c *check.C) { From b96de45be83506f195903c7ab85d61a1003d5b96 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 24 Mar 2015 10:35:01 -0700 Subject: [PATCH 063/501] Use resumable digest for efficient upload finish By using a resumable digester and storing the state of upload digests between subsequent upload chunks, finalizing an upload no longer requires reading back all of the uploaded data to verify the client's expected digest. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/storage/layerstore.go | 1 + docs/storage/layerwriter.go | 228 ++++++++++++++++++++++++++++++++---- docs/storage/paths.go | 22 ++++ 3 files changed, 226 insertions(+), 25 deletions(-) diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index 05881749..77c235aa 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -142,6 +142,7 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di layerStore: ls, uuid: uuid, startedAt: startedAt, + resumableDigester: digest.NewCanonicalResumableDigester(), bufferedFileWriter: *fw, }, nil } diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index 27bbade1..ccd8679b 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -3,7 +3,9 @@ package storage import ( "fmt" "io" + "os" "path" + "strconv" "time" "github.com/Sirupsen/logrus" @@ -20,10 +22,11 @@ var _ distribution.LayerUpload = &layerWriter{} type layerWriter struct { layerStore *layerStore - uuid string - startedAt time.Time + uuid string + startedAt time.Time + resumableDigester digest.ResumableDigester - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisy + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface bufferedFileWriter } @@ -83,37 +86,212 @@ func (lw *layerWriter) Cancel() error { return nil } +func (lw *layerWriter) Write(p []byte) (int, error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := lw.resumeHashAt(lw.offset); err != nil { + return 0, err + } + + return io.MultiWriter(&lw.bufferedFileWriter, lw.resumableDigester).Write(p) +} + +func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := lw.resumeHashAt(lw.offset); err != nil { + return 0, err + } + + return lw.bufferedFileWriter.ReadFrom(io.TeeReader(r, lw.resumableDigester)) +} + +func (lw *layerWriter) Close() error { + if err := lw.storeHashState(); err != nil { + return err + } + + return lw.bufferedFileWriter.Close() +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + name: lw.layerStore.repository.Name(), + uuid: lw.uuid, + alg: lw.resumableDigester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := lw.driver.List(uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +// resumeHashAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (lw *layerWriter) resumeHashAt(offset int64) error { + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + if offset == int64(lw.resumableDigester.Len()) { + // State of digester is already at the requseted offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := lw.getStoredHashStates() + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := lw.driver.Delete(hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + lw.resumableDigester.Reset() + } else { + storedState, err := lw.driver.GetContent(hashStateMatch.path) + if err != nil { + return err + } + + if err = lw.resumableDigester.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired + // offset. + fr, err := newFileReader(lw.driver, lw.path) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(lw.resumableDigester.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", lw.resumableDigester.Len(), err) + } + + if _, err := io.CopyN(lw.resumableDigester, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +func (lw *layerWriter) storeHashState() error { + uploadHashStatePath, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + name: lw.layerStore.repository.Name(), + uuid: lw.uuid, + alg: lw.resumableDigester.Digest().Algorithm(), + offset: int64(lw.resumableDigester.Len()), + }) + if err != nil { + return err + } + + hashState, err := lw.resumableDigester.State() + if err != nil { + return err + } + + return lw.driver.PutContent(uploadHashStatePath, hashState) +} + // validateLayer checks the layer data against the digest, returning an error // if it does not match. The canonical digest is returned. func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - digestVerifier, err := digest.NewDigestVerifier(dgst) - if err != nil { + // Restore the hasher state to the end of the upload. + if err := lw.resumeHashAt(lw.size); err != nil { return "", err } - // TODO(stevvooe): Store resumable hash calculations in upload directory - // in driver. Something like a file at path /resumablehash/ - // with the hash state up to that point would be perfect. The hasher would - // then only have to fetch the difference. + var verified bool + canonical := lw.resumableDigester.Digest() - // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) - if err != nil { - return "", err + if canonical.Algorithm() == dgst.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = dgst == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + digestVerifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return "", err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) + if err != nil { + return "", err + } + + if _, err = io.Copy(digestVerifier, fr); err != nil { + return "", err + } + + verified = digestVerifier.Verified() } - tr := io.TeeReader(fr, digestVerifier) - - // TODO(stevvooe): This is one of the places we need a Digester write - // sink. Instead, its read driven. This might be okay. - - // Calculate an updated digest with the latest version. - canonical, err := digest.FromReader(tr) - if err != nil { - return "", err - } - - if !digestVerifier.Verified() { + if !verified { return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, Reason: fmt.Errorf("content does not match digest"), diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 179e7b78..f541f079 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -33,6 +33,7 @@ const storagePathVersion = "v2" // -> _uploads/ // data // startedat +// hashstates// // -> blob/ // // @@ -87,6 +88,7 @@ const storagePathVersion = "v2" // // uploadDataPathSpec: /v2/repositories//_uploads//data // uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// // // Blob Store: // @@ -249,6 +251,12 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil case uploadStartedAtPathSpec: return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil + case uploadHashStatePathSpec: + offset := fmt.Sprintf("%d", v.offset) + if v.list { + offset = "" // Limit to the prefix for listing offsets. + } + return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil default: // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). return "", fmt.Errorf("unknown path spec: %#v", v) @@ -424,6 +432,20 @@ type uploadStartedAtPathSpec struct { func (uploadStartedAtPathSpec) pathSpec() {} +// uploadHashStatePathSpec defines the path parameters for the file that stores +// the hash function state of an upload at a specific byte offset. If `list` is +// set, then the path mapper will generate a list prefix for all hash state +// offsets for the upload identified by the name, uuid, and alg. +type uploadHashStatePathSpec struct { + name string + uuid string + alg string + offset int64 + list bool +} + +func (uploadHashStatePathSpec) pathSpec() {} + // digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // From a7c2dceea5f40dc14ad4b0e2facebfb3fecbcd91 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 1 Apr 2015 16:30:00 -0700 Subject: [PATCH 064/501] Define and implement layer info cache This changeset defines the interface for layer info caches. Layer info caches speed up access to layer meta data accessed in storage driver backends. The two main operations are tests for repository membership and resolving path and size information for backend blobs. Two implementations are available. The main implementation leverages redis to store layer info. An alternative implementation simply caches layer info in maps, which should speed up resolution for less sophisticated implementations. Signed-off-by: Stephen J Day --- docs/storage/cache/cache.go | 98 +++++++++++++++++++++++++++++++ docs/storage/cache/cache_test.go | 86 +++++++++++++++++++++++++++ docs/storage/cache/memory.go | 63 ++++++++++++++++++++ docs/storage/cache/memory_test.go | 9 +++ docs/storage/cache/redis.go | 98 +++++++++++++++++++++++++++++++ docs/storage/cache/redis_test.go | 50 ++++++++++++++++ 6 files changed, 404 insertions(+) create mode 100644 docs/storage/cache/cache.go create mode 100644 docs/storage/cache/cache_test.go create mode 100644 docs/storage/cache/memory.go create mode 100644 docs/storage/cache/memory_test.go create mode 100644 docs/storage/cache/redis.go create mode 100644 docs/storage/cache/redis_test.go diff --git a/docs/storage/cache/cache.go b/docs/storage/cache/cache.go new file mode 100644 index 00000000..a21cefd5 --- /dev/null +++ b/docs/storage/cache/cache.go @@ -0,0 +1,98 @@ +// Package cache provides facilities to speed up access to the storage +// backend. Typically cache implementations deal with internal implementation +// details at the backend level, rather than generalized caches for +// distribution related interfaces. In other words, unless the cache is +// specific to the storage package, it belongs in another package. +package cache + +import ( + "fmt" + + "github.com/docker/distribution/digest" + "golang.org/x/net/context" +) + +// ErrNotFound is returned when a meta item is not found. +var ErrNotFound = fmt.Errorf("not found") + +// LayerMeta describes the backend location and length of layer data. +type LayerMeta struct { + Path string + Length int64 +} + +// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it +// provides a fast cache for checks against repository metadata, avoiding +// round trips to backend storage. Note that this is different from a pure +// layer cache, which would also provide access to backing data, as well. Such +// a cache should be implemented as a middleware, rather than integrated with +// the storage backend. +// +// Note that most implementations rely on the caller to do strict checks on on +// repo and dgst arguments, since these are mostly used behind existing +// implementations. +type LayerInfoCache interface { + // Contains returns true if the repository with name contains the layer. + Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) + + // Add includes the layer in the given repository cache. + Add(ctx context.Context, repo string, dgst digest.Digest) error + + // Meta provides the location of the layer on the backend and its size. Membership of a + // repository should be tested before using the result, if required. + Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) + + // SetMeta sets the meta data for the given layer. + SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error +} + +// base implements common checks between cache implementations. Note that +// these are not full checks of input, since that should be done by the +// caller. +type base struct { + LayerInfoCache +} + +func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + if repo == "" { + return false, fmt.Errorf("cache: cannot check for empty repository name") + } + + if dgst == "" { + return false, fmt.Errorf("cache: cannot check for empty digests") + } + + return b.LayerInfoCache.Contains(ctx, repo, dgst) +} + +func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { + if repo == "" { + return fmt.Errorf("cache: cannot add empty repository name") + } + + if dgst == "" { + return fmt.Errorf("cache: cannot add empty digest") + } + + return b.LayerInfoCache.Add(ctx, repo, dgst) +} + +func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + if dgst == "" { + return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") + } + + return b.LayerInfoCache.Meta(ctx, dgst) +} + +func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + if dgst == "" { + return fmt.Errorf("cache: cannot set meta for empty digest") + } + + if meta.Path == "" { + return fmt.Errorf("cache: cannot set empty path for meta") + } + + return b.LayerInfoCache.SetMeta(ctx, dgst, meta) +} diff --git a/docs/storage/cache/cache_test.go b/docs/storage/cache/cache_test.go new file mode 100644 index 00000000..48cef955 --- /dev/null +++ b/docs/storage/cache/cache_test.go @@ -0,0 +1,86 @@ +package cache + +import ( + "testing" + + "golang.org/x/net/context" +) + +// checkLayerInfoCache takes a cache implementation through a common set of +// operations. If adding new tests, please add them here so new +// implementations get the benefit. +func checkLayerInfoCache(t *testing.T, lic LayerInfoCache) { + ctx := context.Background() + + exists, err := lic.Contains(ctx, "", "fake:abc") + if err == nil { + t.Fatalf("expected error checking for cache item with empty repo") + } + + exists, err = lic.Contains(ctx, "foo/bar", "") + if err == nil { + t.Fatalf("expected error checking for cache item with empty digest") + } + + exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") + if err != nil { + t.Fatalf("unexpected error checking for cache item: %v", err) + } + + if exists { + t.Fatalf("item should not exist") + } + + if err := lic.Add(ctx, "", "fake:abc"); err == nil { + t.Fatalf("expected error adding cache item with empty name") + } + + if err := lic.Add(ctx, "foo/bar", ""); err == nil { + t.Fatalf("expected error adding cache item with empty digest") + } + + if err := lic.Add(ctx, "foo/bar", "fake:abc"); err != nil { + t.Fatalf("unexpected error adding item: %v", err) + } + + exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") + if err != nil { + t.Fatalf("unexpected error checking for cache item: %v", err) + } + + if !exists { + t.Fatalf("item should exist") + } + + _, err = lic.Meta(ctx, "") + if err == nil || err == ErrNotFound { + t.Fatalf("expected error getting meta for cache item with empty digest") + } + + _, err = lic.Meta(ctx, "fake:abc") + if err != ErrNotFound { + t.Fatalf("expected unknown layer error getting meta for cache item with empty digest") + } + + if err = lic.SetMeta(ctx, "", LayerMeta{}); err == nil { + t.Fatalf("expected error setting meta for cache item with empty digest") + } + + if err = lic.SetMeta(ctx, "foo/bar", LayerMeta{}); err == nil { + t.Fatalf("expected error setting meta for cache item with empty meta") + } + + expected := LayerMeta{Path: "/foo/bar", Length: 20} + if err := lic.SetMeta(ctx, "foo/bar", expected); err != nil { + t.Fatalf("unexpected error setting meta: %v", err) + } + + meta, err := lic.Meta(ctx, "foo/bar") + if err != nil { + t.Fatalf("unexpected error getting meta: %v", err) + } + + if meta != expected { + t.Fatalf("retrieved meta data did not match: %v", err) + } +} diff --git a/docs/storage/cache/memory.go b/docs/storage/cache/memory.go new file mode 100644 index 00000000..6d949792 --- /dev/null +++ b/docs/storage/cache/memory.go @@ -0,0 +1,63 @@ +package cache + +import ( + "github.com/docker/distribution/digest" + "golang.org/x/net/context" +) + +// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. +type inmemoryLayerInfoCache struct { + membership map[string]map[digest.Digest]struct{} + meta map[digest.Digest]LayerMeta +} + +// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that +// stores results in memory. +func NewInMemoryLayerInfoCache() LayerInfoCache { + return &base{&inmemoryLayerInfoCache{ + membership: make(map[string]map[digest.Digest]struct{}), + meta: make(map[digest.Digest]LayerMeta), + }} +} + +func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + members, ok := ilic.membership[repo] + if !ok { + return false, nil + } + + _, ok = members[dgst] + return ok, nil +} + +// Add adds the layer to the redis repository blob set. +func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { + members, ok := ilic.membership[repo] + if !ok { + members = make(map[digest.Digest]struct{}) + ilic.membership[repo] = members + } + + members[dgst] = struct{}{} + + return nil +} + +// Meta retrieves the layer meta data from the redis hash, returning +// ErrUnknownLayer if not found. +func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + meta, ok := ilic.meta[dgst] + if !ok { + return LayerMeta{}, ErrNotFound + } + + return meta, nil +} + +// SetMeta sets the meta data for the given digest using a redis hash. A hash +// is used here since we may store unrelated fields about a layer in the +// future. +func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + ilic.meta[dgst] = meta + return nil +} diff --git a/docs/storage/cache/memory_test.go b/docs/storage/cache/memory_test.go new file mode 100644 index 00000000..417e982e --- /dev/null +++ b/docs/storage/cache/memory_test.go @@ -0,0 +1,9 @@ +package cache + +import "testing" + +// TestInMemoryLayerInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryLayerInfoCache(t *testing.T) { + checkLayerInfoCache(t, NewInMemoryLayerInfoCache()) +} diff --git a/docs/storage/cache/redis.go b/docs/storage/cache/redis.go new file mode 100644 index 00000000..6b8f7679 --- /dev/null +++ b/docs/storage/cache/redis.go @@ -0,0 +1,98 @@ +package cache + +import ( + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/garyburd/redigo/redis" + "golang.org/x/net/context" +) + +// redisLayerInfoCache provides an implementation of storage.LayerInfoCache +// based on redis. Layer info is stored in two parts. The first provide fast +// access to repository membership through a redis set for each repo. The +// second is a redis hash keyed by the digest of the layer, providing path and +// length information. Note that there is no implied relationship between +// these two caches. The layer may exist in one, both or none and the code +// must be written this way. +type redisLayerInfoCache struct { + pool *redis.Pool + + // TODO(stevvooe): We use a pool because we don't have great control over + // the cache lifecycle to manage connections. A new connection if fetched + // for each operation. Once we have better lifecycle management of the + // request objects, we can change this to a connection. +} + +// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the +// provided redis connection pool. +func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { + return &base{&redisLayerInfoCache{ + pool: pool, + }} +} + +// Contains does a membership check on the repository blob set in redis. This +// is used as an access check before looking up global path information. If +// false is returned, the caller should still check the backend to if it +// exists elsewhere. +func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { + conn := rlic.pool.Get() + defer conn.Close() + + ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) + return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) +} + +// Add adds the layer to the redis repository blob set. +func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { + conn := rlic.pool.Get() + defer conn.Close() + + ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) + _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) + return err +} + +// Meta retrieves the layer meta data from the redis hash, returning +// ErrUnknownLayer if not found. +func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { + conn := rlic.pool.Get() + defer conn.Close() + + reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) + if err != nil { + return LayerMeta{}, err + } + + if len(reply) < 2 || reply[0] == nil || reply[1] == nil { + return LayerMeta{}, ErrNotFound + } + + var meta LayerMeta + if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { + return LayerMeta{}, err + } + + return meta, nil +} + +// SetMeta sets the meta data for the given digest using a redis hash. A hash +// is used here since we may store unrelated fields about a layer in the +// future. +func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { + conn := rlic.pool.Get() + defer conn.Close() + + _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) + return err +} + +// repositoryBlobSetKey returns the key for the blob set in the cache. +func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { + return "repository::" + repo + "::blobs" +} + +// blobMetaHashKey returns the cache key for immutable blob meta data. +func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { + return "blobs::" + dgst.String() +} diff --git a/docs/storage/cache/redis_test.go b/docs/storage/cache/redis_test.go new file mode 100644 index 00000000..7422a7eb --- /dev/null +++ b/docs/storage/cache/redis_test.go @@ -0,0 +1,50 @@ +package cache + +import ( + "flag" + "os" + "testing" + "time" + + "github.com/garyburd/redigo/redis" +) + +var redisAddr string + +func init() { + flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") +} + +// TestRedisLayerInfoCache exercises a live redis instance using the cache +// implementation. +func TestRedisLayerInfoCache(t *testing.T) { + if redisAddr == "" { + // fallback to an environement variable + redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") + } + + if redisAddr == "" { + // skip if still not set + t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.Dial("tcp", redisAddr) + }, + MaxIdle: 1, + MaxActive: 2, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Wait: false, // if a connection is not avialable, proceed without cache. + } + + // Clear the database + if _, err := pool.Get().Do("FLUSHDB"); err != nil { + t.Fatalf("unexpected error flushing redis db: %v", err) + } + + checkLayerInfoCache(t, NewRedisLayerInfoCache(pool)) +} From 6ab228f79828dda905a33952c3a5f1554ee0deb5 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 1 Apr 2015 16:41:33 -0700 Subject: [PATCH 065/501] Integrate layer info cache with registry and storage This changeset integrates the layer info cache with the registry webapp and storage backend. The main benefit is to cache immutable layer meta data, reducing backend roundtrips. The cache can be configured to use either redis or an inmemory cache. This provides massive performance benefits for HEAD http checks on layer blobs and manifest verification. Signed-off-by: Stephen J Day --- docs/doc.go | 2 +- docs/handlers/app.go | 9 +- docs/handlers/app_test.go | 3 +- docs/storage/blobstore.go | 5 +- docs/storage/filereader.go | 4 +- docs/storage/layer_test.go | 9 +- docs/storage/layercache.go | 183 +++++++++++++++++++++++++++++ docs/storage/layerreader.go | 15 +++ docs/storage/manifeststore_test.go | 4 +- docs/storage/registry.go | 46 ++++++-- 10 files changed, 256 insertions(+), 24 deletions(-) create mode 100644 docs/storage/layercache.go diff --git a/docs/doc.go b/docs/doc.go index 5049dae3..1c01e42e 100644 --- a/docs/doc.go +++ b/docs/doc.go @@ -1,3 +1,3 @@ // Package registry is a placeholder package for registry interface -// destinations and utilities. +// definitions and utilities. package registry diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f837e861..e333d6d9 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -18,6 +18,7 @@ import ( registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" @@ -102,7 +103,13 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureEvents(&configuration) app.configureRedis(&configuration) - app.registry = storage.NewRegistryWithDriver(app.driver) + if app.redis != nil { + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) + } else { + // always fall back to inmemory storage + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) + } + app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) if err != nil { panic(err) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index cd515dd0..d0b9174d 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -13,6 +13,7 @@ import ( "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/driver/inmemory" "golang.org/x/net/context" ) @@ -28,7 +29,7 @@ func TestAppDispatcher(t *testing.T) { Context: context.Background(), router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(driver), + registry: storage.NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 975df19f..8bab2f5e 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -18,8 +18,9 @@ import ( // abstraction, providing utility methods that support creating and traversing // backend links. type blobStore struct { - *registry - ctx context.Context + driver storagedriver.StorageDriver + pm *pathMapper + ctx context.Context } // exists reports whether or not the path exists. If the driver returns error diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index b70b1fb2..65d4347f 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -27,8 +27,8 @@ type fileReader struct { // identifying fields path string - size int64 // size is the total layer size, must be set. - modtime time.Time + size int64 // size is the total size, must be set. + modtime time.Time // TODO(stevvooe): This is not needed anymore. // mutable fields rc io.ReadCloser // remote read closer diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index 43e028d5..e225d068 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -35,7 +36,7 @@ func TestSimpleLayerUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -143,7 +144,7 @@ func TestSimpleLayerRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -180,7 +181,7 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("unexpected error fetching non-existent layer: %v", err) } - randomLayerDigest, err := writeTestLayer(driver, ls.(*layerStore).repository.pm, imageName, dgst, randomLayerReader) + randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader) if err != nil { t.Fatalf("unexpected error writing test layer: %v", err) } @@ -252,7 +253,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/layercache.go b/docs/storage/layercache.go new file mode 100644 index 00000000..c7ee9b27 --- /dev/null +++ b/docs/storage/layercache.go @@ -0,0 +1,183 @@ +package storage + +import ( + "expvar" + "sync/atomic" + "time" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/driver" + "golang.org/x/net/context" +) + +// cachedLayerService implements the layer service with path-aware caching, +// using a LayerInfoCache interface. +type cachedLayerService struct { + distribution.LayerService // upstream layer service + repository distribution.Repository + ctx context.Context + driver driver.StorageDriver + *blobStore // global blob store + cache cache.LayerInfoCache +} + +// Exists checks for existence of the digest in the cache, immediately +// returning if it exists for the repository. If not, the upstream is checked. +// When a positive result is found, it is written into the cache. +func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { + ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) + now := time.Now() + defer func() { + // TODO(stevvooe): Replace this with a decent context-based metrics solution + ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). + Infof("(*cachedLayerService).Exists(%q)", dgst) + }() + + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) + available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + if available { + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) + return true, nil + } + +fallback: + atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) + exists, err := lc.LayerService.Exists(dgst) + if err != nil { + return exists, err + } + + if exists { + // we can only cache this if the existence is positive. + if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) + } + } + + return exists, err +} + +// Fetch checks for the availability of the layer in the repository via the +// cache. If present, the metadata is resolved and the layer is returned. If +// any operation fails, the layer is read directly from the upstream. The +// results are cached, if possible. +func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { + ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) + now := time.Now() + defer func() { + ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). + Infof("(*layerInfoCache).Fetch(%q)", dgst) + }() + + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) + available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + if available { + // fast path: get the layer info and return + meta, err := lc.cache.Meta(lc.ctx, dgst) + if err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) + goto fallback + } + + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) + return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) + } + + // NOTE(stevvooe): Unfortunately, the cache here only makes checks for + // existing layers faster. We'd have to provide more careful + // synchronization with the backend to make the missing case as fast. + +fallback: + atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) + layer, err := lc.LayerService.Fetch(dgst) + if err != nil { + return nil, err + } + + // add the layer to the repository + if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { + ctxu.GetLogger(lc.ctx). + Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) + } + + // lookup layer path and add it to the cache, if it succeds. Note that we + // still return the layer even if we have trouble caching it. + if path, err := lc.resolveLayerPath(layer); err != nil { + ctxu.GetLogger(lc.ctx). + Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) + } else { + // add the layer to the cache once we've resolved the path. + if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { + ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) + } + } + + return layer, err +} + +// extractLayerInfo pulls the layerInfo from the layer, attempting to get the +// path information from either the concrete object or by resolving the +// primary blob store path. +func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { + // try and resolve the type and driver, so we don't have to traverse links + switch v := layer.(type) { + case *layerReader: + // only set path if we have same driver instance. + if v.driver == lc.driver { + return v.path, nil + } + } + + ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) + // we have to do an expensive stat to resolve the layer location but no + // need to check the link, since we already have layer instance for this + // repository. + bp, err := lc.blobStore.path(layer.Digest()) + if err != nil { + return "", err + } + + return bp, nil +} + +// layerInfoCacheMetrics keeps track of cache metrics for layer info cache +// requests. Note this is kept globally and made available via expvar. For +// more detailed metrics, its recommend to instrument a particular cache +// implementation. +var layerInfoCacheMetrics struct { + // Exists tracks calls to the Exists caches. + Exists struct { + Requests uint64 + Hits uint64 + Misses uint64 + } + + // Fetch tracks calls to the fetch caches. + Fetch struct { + Requests uint64 + Hits uint64 + Misses uint64 + } +} + +func init() { + expvar.Publish("layerinfocache", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return layerInfoCacheMetrics + })) +} diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 414951d9..40deba6a 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -17,6 +17,21 @@ type layerReader struct { digest digest.Digest } +// newLayerReader returns a new layerReader with the digest, path and length, +// eliding round trips to the storage backend. +func newLayerReader(driver driver.StorageDriver, dgst digest.Digest, path string, length int64) (*layerReader, error) { + fr := &fileReader{ + driver: driver, + path: path, + size: length, + } + + return &layerReader{ + fileReader: *fr, + digest: dgst, + }, nil +} + var _ distribution.Layer = &layerReader{} func (lr *layerReader) Digest() digest.Digest { diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index dc03dced..fe75868b 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -6,6 +6,8 @@ import ( "reflect" "testing" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -28,7 +30,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(driver) + registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) repo, err := registry.Repository(ctx, name) if err != nil { diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 8d7ea16e..9ad43acb 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -3,6 +3,7 @@ package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "golang.org/x/net/context" ) @@ -10,28 +11,29 @@ import ( // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore + driver storagedriver.StorageDriver + pm *pathMapper + blobStore *blobStore + layerInfoCache cache.LayerInfoCache } // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver) distribution.Registry { - bs := &blobStore{} +func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Registry { + bs := &blobStore{ + driver: driver, + pm: defaultPathMapper, + } - reg := ®istry{ + return ®istry{ driver: driver, blobStore: bs, // TODO(sday): This should be configurable. - pm: defaultPathMapper, + pm: defaultPathMapper, + layerInfoCache: layerInfoCache, } - - reg.blobStore.registry = reg - - return reg } // Repository returns an instance of the repository tied to the registry. @@ -83,9 +85,29 @@ func (repo *repository) Manifests() distribution.ManifestService { // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Layers() distribution.LayerService { - return &layerStore{ + ls := &layerStore{ repository: repo, } + + if repo.registry.layerInfoCache != nil { + // TODO(stevvooe): This is not the best place to setup a cache. We would + // really like to decouple the cache from the backend but also have the + // manifeset service use the layer service cache. For now, we can simply + // integrate the cache directly. The main issue is that we have layer + // access and layer data coupled in a single object. Work is already under + // way to decouple this. + + return &cachedLayerService{ + LayerService: ls, + repository: repo, + ctx: repo.ctx, + driver: repo.driver, + blobStore: repo.blobStore, + cache: repo.registry.layerInfoCache, + } + } + + return ls } func (repo *repository) Signatures() distribution.SignatureService { From 4e1ecad6cc31a080b0c0044abf99c55d2338e3bf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 2 Apr 2015 16:38:01 -0700 Subject: [PATCH 066/501] Allow control over which storage cache to use This allows one to better control the usage of the cache and turn it off completely. The storage configuration module was modified to allow parameters to be passed to just the storage implementation, rather than to the driver. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index e333d6d9..0863732c 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -103,11 +103,28 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureEvents(&configuration) app.configureRedis(&configuration) - if app.redis != nil { - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) - } else { - // always fall back to inmemory storage - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) + // configure storage caches + if cc, ok := configuration.Storage["cache"]; ok { + switch cc["layerinfo"] { + case "redis": + if app.redis == nil { + panic("redis configuration required to use for layerinfo cache") + } + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) + ctxu.GetLogger(app).Infof("using redis layerinfo cache") + case "inmemory": + app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) + ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") + default: + if cc["layerinfo"] != "" { + ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) + } + } + } + + if app.registry == nil { + // configure the registry if no cache section is available. + app.registry = storage.NewRegistryWithDriver(app.driver, nil) } app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) From 6b748a74ef9cb9677e3bda151cf2111b70375d2c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 2 Apr 2015 21:22:11 -0700 Subject: [PATCH 067/501] Move expvar under the registry section For consistency with other systems, the redis and caching monitoring data has been moved under the "registry" section in expvar. This ensures the entire registry state is kept to a single section. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 9 +++++++-- docs/storage/layercache.go | 21 ++++++++++++++++++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 0863732c..fac93382 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -272,13 +272,18 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { app.redis = pool - expvar.Publish("redis", expvar.Func(func() interface{} { + // setup expvar + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { return map[string]interface{}{ "Config": configuration.Redis, "Active": app.redis.ActiveCount(), } })) - } func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { diff --git a/docs/storage/layercache.go b/docs/storage/layercache.go index c7ee9b27..b9732f20 100644 --- a/docs/storage/layercache.go +++ b/docs/storage/layercache.go @@ -174,7 +174,26 @@ var layerInfoCacheMetrics struct { } func init() { - expvar.Publish("layerinfocache", expvar.Func(func() interface{} { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { // no need for synchronous access: the increments are atomic and // during reading, we don't care if the data is up to date. The // numbers will always *eventually* be reported correctly. From def60f3426b25c73d99f2ce2a449de7c9043e4b7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 6 Apr 2015 19:10:50 -0700 Subject: [PATCH 068/501] Parallelize signature fetch in signature store To avoid compounded round trips leading to slow retrieval of manifests with a large number of signatures, the fetch of signatures has been parallelized. This simply spawns a goroutine for each path, coordinated with a sync.WaitGroup. Signed-off-by: Stephen J Day --- docs/storage/signaturestore.go | 42 +++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index abc52ca6..33912e8e 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -2,8 +2,10 @@ package storage import ( "path" + "sync" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) @@ -33,18 +35,42 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { return nil, err } - var signatures [][]byte - for _, sigPath := range signaturePaths { + var wg sync.WaitGroup + signatures := make([][]byte, len(signaturePaths)) // make space for everything + errCh := make(chan error, 1) // buffered chan so one proceeds + for i, sigPath := range signaturePaths { // Append the link portion sigPath = path.Join(sigPath, "link") - // TODO(stevvooe): These fetches should be parallelized for performance. - p, err := s.blobStore.linked(sigPath) - if err != nil { - return nil, err - } + wg.Add(1) + go func(idx int, sigPath string) { + defer wg.Done() + context.GetLogger(s.ctx). + Debugf("fetching signature from %q", sigPath) + p, err := s.blobStore.linked(sigPath) + if err != nil { + context.GetLogger(s.ctx). + Errorf("error fetching signature from %q: %v", sigPath, err) - signatures = append(signatures, p) + // try to send an error, if it hasn't already been sent. + select { + case errCh <- err: + default: + } + + return + } + signatures[idx] = p + }(i, sigPath) + } + wg.Wait() + + select { + case err := <-errCh: + // just return the first error, similar to single threaded code. + return nil, err + default: + // pass } return signatures, nil From 8c0859e39cc36530a91ed67de1d5573528bf09e6 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 6 Apr 2015 16:23:31 -0700 Subject: [PATCH 069/501] Handle cloudFront bucket prefix issue Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- .../middleware/cloudfront/middleware.go | 24 +++++++++---------- docs/storage/driver/s3/s3.go | 5 ++++ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index 2d155312..aee068a5 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -8,10 +8,10 @@ import ( "encoding/pem" "fmt" "io/ioutil" - "net/url" "time" "github.com/AdRoll/goamz/cloudfront" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) @@ -90,23 +90,23 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil } +// S3BucketKeyer is any type that is capable of returning the S3 bucket key +// which should be cached by AWS CloudFront. +type S3BucketKeyer interface { + S3BucketKey(path string) string +} + // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 - options["expiry"] = time.Now().Add(lh.duration) - - layerURLStr, err := lh.StorageDriver.URLFor(path, options) - if err != nil { - return "", err + keyer, ok := lh.StorageDriver.(S3BucketKeyer) + if !ok { + context.GetLogger(context.Background()).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(path, options) } - layerURL, err := url.Parse(layerURLStr) - if err != nil { - return "", err - } - - cfURL, err := lh.cloudfront.CannedSignedURL(layerURL.Path, "", time.Now().Add(lh.duration)) + cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) if err != nil { return "", err } diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index d240c901..402f2eaa 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -695,6 +695,11 @@ func (d *driver) s3Path(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + func parseError(path string, err error) error { if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { return storagedriver.PathNotFoundError{Path: path} From 2b4ad94ceec4b659b16ce9b3c17ca69e506bbc6f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 7 Apr 2015 14:14:45 -0700 Subject: [PATCH 070/501] Defer case-sensitive support to storage backend Rather than enforce lowercase paths for all drivers, support for case-sensitivity has been deferred to the driver. There are a few caveats to this approach: 1. There are possible security implications for tags that only differ in their case. For instance, a tag "A" may be equivalent to tag "a" on certain file system backends. 2. All system paths should not use case-sensitive identifiers where possible. This might be problematic in a blob store that uses case-sensitive ids. For now, since digest hex ids are all case-insensitive, this will not be an issue. The recommend workaround is to not run the registry on a case-insensitive filesystem driver in security sensitive applications. Signed-off-by: Stephen J Day --- docs/storage/driver/storagedriver.go | 2 +- docs/storage/driver/testsuites/testsuites.go | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index f0fe7fef..442dc257 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -83,7 +83,7 @@ type StorageDriver interface { // number of path components separated by slashes, where each component is // restricted to lowercase alphanumeric characters or a period, underscore, or // hyphen. -var PathRegexp = regexp.MustCompile(`^(/[a-z0-9._-]+)+$`) +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. var ErrUnsupportedMethod = errors.New("unsupported method") diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 18fd9840..74ddab6f 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -136,7 +136,9 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { "/.abc", "/a--b", "/a-.b", - "/_.abc"} + "/_.abc", + "/Docker/docker-registry", + "/Abc/Cba"} for _, filename := range validFiles { err := suite.StorageDriver.PutContent(filename, contents) @@ -159,8 +161,7 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { "abc", "123.abc", "//bcd", - "/abc_123/", - "/Docker/docker-registry"} + "/abc_123/"} for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(filename, contents) From 250e61e2a13b4b82fcc01b40d5853a32aa91c8f9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 9 Apr 2015 14:08:24 -0700 Subject: [PATCH 071/501] Prevent false sharing in signature fetch The original implementation wrote to different locations in a shared slice. While this is theoretically okay, we end up thrashing the cpu cache since multiple slice members may be on the same cache line. So, even though each thread has its own memory location, there may be contention over the cache line. This changes the code to aggregate to a slice in a single goroutine. In reality, this change likely won't have any performance impact. The theory proposed above hasn't really even been tested. Either way, we can consider it and possibly go forward. Signed-off-by: Stephen J Day --- docs/storage/signaturestore.go | 56 +++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index 33912e8e..7094b69e 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -36,8 +36,13 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { } var wg sync.WaitGroup - signatures := make([][]byte, len(signaturePaths)) // make space for everything - errCh := make(chan error, 1) // buffered chan so one proceeds + type result struct { + index int + signature []byte + err error + } + ch := make(chan result) + for i, sigPath := range signaturePaths { // Append the link portion sigPath = path.Join(sigPath, "link") @@ -47,33 +52,42 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { defer wg.Done() context.GetLogger(s.ctx). Debugf("fetching signature from %q", sigPath) - p, err := s.blobStore.linked(sigPath) - if err != nil { + + r := result{index: idx} + if p, err := s.blobStore.linked(sigPath); err != nil { context.GetLogger(s.ctx). Errorf("error fetching signature from %q: %v", sigPath, err) - - // try to send an error, if it hasn't already been sent. - select { - case errCh <- err: - default: - } - - return + r.err = err + } else { + r.signature = p } - signatures[idx] = p + + ch <- r }(i, sigPath) } - wg.Wait() + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() - select { - case err := <-errCh: - // just return the first error, similar to single threaded code. - return nil, err - default: - // pass + // aggregrate the results + signatures := make([][]byte, len(signaturePaths)) +loop: + for { + select { + case result := <-ch: + signatures[result.index] = result.signature + if result.err != nil && err == nil { + // only set the first one. + err = result.err + } + case <-done: + break loop + } } - return signatures, nil + return signatures, err } func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { From 36a076995bcb190b854c8d40fd00b1f3dfb9ebc7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 9 Apr 2015 18:45:39 -0700 Subject: [PATCH 072/501] Disassociate instance id from application This moves the instance id out of the app so that it is associated with an instantiation of the runtime. The instance id is stored on the background context. This allows allow contexts using the main background context to include an instance id for log messages. It also simplifies the application slightly. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index fac93382..059af260 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -8,7 +8,6 @@ import ( "os" "time" - "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" @@ -32,11 +31,8 @@ import ( // fields should be protected. type App struct { context.Context - Config configuration.Configuration - // InstanceID is a unique id assigned to the application on each creation. - // Provides information in the logs and context to identify restarts. - InstanceID string + Config configuration.Configuration router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. @@ -52,29 +48,17 @@ type App struct { redis *redis.Pool } -// Value intercepts calls context.Context.Value, returning the current app id, -// if requested. -func (app *App) Value(key interface{}) interface{} { - switch key { - case "app.id": - return app.InstanceID - } - - return app.Context.Value(key) -} - // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. func NewApp(ctx context.Context, configuration configuration.Configuration) *App { app := &App{ - Config: configuration, - Context: ctx, - InstanceID: uuid.New(), - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + Config: configuration, + Context: ctx, + router: v2.RouterWithPrefix(configuration.HTTP.Prefix), } - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "app.id")) + app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) // Register the handler dispatchers. app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { @@ -200,7 +184,7 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { app.events.source = notifications.SourceRecord{ Addr: hostname, - InstanceID: app.InstanceID, + InstanceID: ctxu.GetStringValue(app, "instance.id"), } } From e83e37618f4b977c28aae7b56c3212f8d4c0005f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 9 Apr 2015 19:21:33 -0700 Subject: [PATCH 073/501] Rename top level registry interface to namespace Registry is intended to be used as a repository service than an abstract collection of repositories. Namespace better describes a collection of repositories retrievable by name. The registry service serves any repository in the global scope. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/handlers/app.go | 4 ++-- docs/middleware/registry/middleware.go | 4 ++-- docs/storage/manifeststore_test.go | 2 +- docs/storage/registry.go | 8 +++++++- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index fac93382..657ed2db 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -40,7 +40,7 @@ type App struct { router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Registry // registry is the primary registry backend for the app instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. accessController auth.AccessController // main access controller for application // events contains notification related configuration. @@ -541,7 +541,7 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au } // applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Registry, middlewares []configuration.Middleware) (distribution.Registry, error) { +func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) if err != nil { diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go index d3e88810..048603b8 100644 --- a/docs/middleware/registry/middleware.go +++ b/docs/middleware/registry/middleware.go @@ -8,7 +8,7 @@ import ( // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(registry distribution.Registry, options map[string]interface{}) (distribution.Registry, error) +type InitFunc func(registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc @@ -28,7 +28,7 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, registry distribution.Registry) (distribution.Registry, error) { +func Get(name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(registry, options) diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index fe75868b..a70789d3 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -21,7 +21,7 @@ import ( type manifestStoreTestEnv struct { ctx context.Context driver driver.StorageDriver - registry distribution.Registry + registry distribution.Namespace repository distribution.Repository name string tag string diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 9ad43acb..1126db45 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -20,7 +20,7 @@ type registry struct { // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Registry { +func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { bs := &blobStore{ driver: driver, pm: defaultPathMapper, @@ -36,6 +36,12 @@ func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache ca } } +// Scope returns the namespace scope for a registry. The registry +// will only serve repositories contained within this scope. +func (reg *registry) Scope() distribution.Scope { + return distribution.GlobalScope +} + // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. From 4ac515fde468f8d0845ae68fa4416ed4525bce90 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 10 Apr 2015 15:56:29 -0700 Subject: [PATCH 074/501] Prevent Close() from being called after Finish() --- docs/storage/layerwriter.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index ccd8679b..58078459 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -109,6 +109,10 @@ func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { } func (lw *layerWriter) Close() error { + if lw.err != nil { + return lw.err + } + if err := lw.storeHashState(); err != nil { return err } From 12bf470b2f42c0b65d18a880ffc2f17498d9af4c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 9 Apr 2015 18:50:57 -0700 Subject: [PATCH 075/501] Trace function calls to Base storage driver Signed-off-by: Stephen J Day --- docs/storage/driver/base/base.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index 0365ba3c..ba7a859d 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -40,6 +40,7 @@ package base import ( "io" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -51,6 +52,9 @@ type Base struct { // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(path string) ([]byte, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.GetContent") + if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } @@ -60,6 +64,9 @@ func (base *Base) GetContent(path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(path string, content []byte) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.PutContent") + if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } @@ -69,6 +76,9 @@ func (base *Base) PutContent(path string, content []byte) error { // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.ReadStream") + if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -82,6 +92,9 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream wraps WriteStream of underlying storage driver. func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.WriteStream") + if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -95,6 +108,9 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.Stat") + if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } @@ -104,6 +120,9 @@ func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { // List wraps List of underlying storage driver. func (base *Base) List(path string) ([]string, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.List") + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} } @@ -113,6 +132,9 @@ func (base *Base) List(path string) ([]string, error) { // Move wraps Move of underlying storage driver. func (base *Base) Move(sourcePath string, destPath string) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.Move") + if !storagedriver.PathRegexp.MatchString(sourcePath) { return storagedriver.InvalidPathError{Path: sourcePath} } else if !storagedriver.PathRegexp.MatchString(destPath) { @@ -124,6 +146,9 @@ func (base *Base) Move(sourcePath string, destPath string) error { // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(path string) error { + _, done := context.WithTrace(context.Background()) + defer done("Base.Move") + if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } @@ -133,6 +158,9 @@ func (base *Base) Delete(path string) error { // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { + _, done := context.WithTrace(context.Background()) + defer done("Base.URLFor") + if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} } From 98985526561569713b14ce55ae370184b63fd01a Mon Sep 17 00:00:00 2001 From: Richard Date: Tue, 14 Apr 2015 16:07:23 -0700 Subject: [PATCH 076/501] Add auth.user.name to logging context --- docs/auth/silly/access.go | 2 +- docs/handlers/app.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 134b0ae5..39318d1a 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -66,7 +66,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, &challenge } - return context.WithValue(ctx, "auth.user", auth.UserInfo{Name: "silly"}), nil + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil } type challenge struct { diff --git a/docs/handlers/app.go b/docs/handlers/app.go index c106df47..8188c9cf 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -324,6 +324,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return } + // Add username to request logging + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + if app.nameRequired(r) { repository, err := app.registry.Repository(context, getName(context)) @@ -456,7 +459,6 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // should be replaced by another, rather than replacing the context on a // mutable object. context.Context = ctx - return nil } From 16174241d1ab1dff5996973ae04f73790a33c4d3 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 15 Apr 2015 17:55:15 -0700 Subject: [PATCH 077/501] Update final upload chunk api doc Updates description about content length and location Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/api/v2/descriptors.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 73f8b463..833bff8b 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -1190,9 +1190,10 @@ var routeDescriptors = []RouteDescriptor{ StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { - Name: "Location", - Type: "url", - Format: "", + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", }, { Name: "Content-Range", @@ -1200,12 +1201,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "-", Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, + contentLengthZeroHeader, digestHeader, }, }, From 480d864fc417d083b6422b88929750291e72da14 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 16 Apr 2015 01:12:45 +0000 Subject: [PATCH 078/501] Use a build flag to disable resumable digests. Signed-off-by: Andy Goldstein --- docs/storage/layerstore.go | 9 ++-- docs/storage/layerwriter.go | 60 +++++++++++++++++------- docs/storage/layerwriter_nonresumable.go | 6 +++ docs/storage/layerwriter_resumable.go | 9 ++++ 4 files changed, 65 insertions(+), 19 deletions(-) create mode 100644 docs/storage/layerwriter_nonresumable.go create mode 100644 docs/storage/layerwriter_resumable.go diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index 77c235aa..1c7428a9 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -138,13 +138,16 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di return nil, err } - return &layerWriter{ + lw := &layerWriter{ layerStore: ls, uuid: uuid, startedAt: startedAt, - resumableDigester: digest.NewCanonicalResumableDigester(), bufferedFileWriter: *fw, - }, nil + } + + lw.setupResumableDigester() + + return lw, nil } func (ls *layerStore) path(dgst digest.Digest) (string, error) { diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index 58078459..1e5ea918 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -87,6 +87,10 @@ func (lw *layerWriter) Cancel() error { } func (lw *layerWriter) Write(p []byte) (int, error) { + if lw.resumableDigester == nil { + return lw.bufferedFileWriter.Write(p) + } + // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. @@ -98,6 +102,10 @@ func (lw *layerWriter) Write(p []byte) (int, error) { } func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { + if lw.resumableDigester == nil { + return lw.bufferedFileWriter.ReadFrom(r) + } + // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. @@ -113,8 +121,10 @@ func (lw *layerWriter) Close() error { return lw.err } - if err := lw.storeHashState(); err != nil { - return err + if lw.resumableDigester != nil { + if err := lw.storeHashState(); err != nil { + return err + } } return lw.bufferedFileWriter.Close() @@ -261,22 +271,37 @@ func (lw *layerWriter) storeHashState() error { // validateLayer checks the layer data against the digest, returning an error // if it does not match. The canonical digest is returned. func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - // Restore the hasher state to the end of the upload. - if err := lw.resumeHashAt(lw.size); err != nil { - return "", err + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if lw.resumableDigester != nil { + // Restore the hasher state to the end of the upload. + if err := lw.resumeHashAt(lw.size); err != nil { + return "", err + } + + canonical = lw.resumableDigester.Digest() + + if canonical.Algorithm() == dgst.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = dgst == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true } - var verified bool - canonical := lw.resumableDigester.Digest() + if fullHash { + digester := digest.NewCanonicalDigester() - if canonical.Algorithm() == dgst.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = dgst == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. digestVerifier, err := digest.NewDigestVerifier(dgst) if err != nil { return "", err @@ -288,10 +313,13 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) return "", err } - if _, err = io.Copy(digestVerifier, fr); err != nil { + tr := io.TeeReader(fr, digester) + + if _, err = io.Copy(digestVerifier, tr); err != nil { return "", err } + canonical = digester.Digest() verified = digestVerifier.Verified() } diff --git a/docs/storage/layerwriter_nonresumable.go b/docs/storage/layerwriter_nonresumable.go new file mode 100644 index 00000000..d4350c6b --- /dev/null +++ b/docs/storage/layerwriter_nonresumable.go @@ -0,0 +1,6 @@ +// +build noresumabledigest + +package storage + +func (lw *layerWriter) setupResumableDigester() { +} diff --git a/docs/storage/layerwriter_resumable.go b/docs/storage/layerwriter_resumable.go new file mode 100644 index 00000000..7d8c6335 --- /dev/null +++ b/docs/storage/layerwriter_resumable.go @@ -0,0 +1,9 @@ +// +build !noresumabledigest + +package storage + +import "github.com/docker/distribution/digest" + +func (lw *layerWriter) setupResumableDigester() { + lw.resumableDigester = digest.NewCanonicalResumableDigester() +} From 136f0ed8bb00d19ae67f54b4063a59096d927dea Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 15 Apr 2015 19:20:45 -0700 Subject: [PATCH 079/501] Instantiate http context before dispatch Ensure that the status is logged in the context by instantiating before the request is routed to handlers. While this requires some level of hacking to acheive, the result is that the context value of "http.request.status" is as accurate as possible for each request. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 55 ++++++++++++++---------------------- docs/handlers/context.go | 60 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 34 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 8188c9cf..c2685d98 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -273,6 +273,21 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. + // Instantiate an http context here so we can track the error codes + // returned by the request router. + ctx := defaultContextManager.context(app, w, r) + defer func() { + ctxu.GetResponseLogger(ctx).Infof("response completed") + }() + defer defaultContextManager.release(ctx) + + // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. + var err error + w, err = ctxu.GetResponseWriter(ctx) + if err != nil { + ctxu.GetLogger(ctx).Warnf("response writer not found in context") + } + // Set a header with the Docker Distribution API Version for all responses. w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") app.router.ServeHTTP(w, r) @@ -287,38 +302,12 @@ type dispatchFunc func(ctx *Context, r *http.Request) http.Handler // TODO(stevvooe): dispatchers should probably have some validation error // chain with proper error reporting. -// singleStatusResponseWriter only allows the first status to be written to be -// the valid request status. The current use case of this class should be -// factored out. -type singleStatusResponseWriter struct { - http.ResponseWriter - status int -} - -func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { - if ssrw.status != 0 { - return - } - ssrw.status = status - ssrw.ResponseWriter.WriteHeader(status) -} - -func (ssrw *singleStatusResponseWriter) Flush() { - if flusher, ok := ssrw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { context := app.context(w, r) - defer func() { - ctxu.GetResponseLogger(context).Infof("response completed") - }() - if err := app.authorized(w, r, context); err != nil { ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) return @@ -360,16 +349,16 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } } - handler := dispatch(context, r) - - ssrw := &singleStatusResponseWriter{ResponseWriter: w} - handler.ServeHTTP(ssrw, r) + dispatch(context, r).ServeHTTP(w, r) // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { - if ssrw.status == 0 { + if context.Value("http.response.status") == 0 { + // TODO(stevvooe): Getting this value from the context is a + // bit of a hack. We can further address with some of our + // future refactoring. w.WriteHeader(http.StatusBadRequest) } serveJSON(w, context.Errors) @@ -380,10 +369,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := ctxu.WithRequest(app, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx := defaultContextManager.context(app, w, r) ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", "vars.reference", diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 5496a794..0df55346 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -3,6 +3,7 @@ package handlers import ( "fmt" "net/http" + "sync" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -88,3 +89,62 @@ func getUserName(ctx context.Context, r *http.Request) string { return username } + +// contextManager allows us to associate net/context.Context instances with a +// request, based on the memory identity of http.Request. This prepares http- +// level context, which is not application specific. If this is called, +// (*contextManager).release must be called on the context when the request is +// completed. +// +// Providing this circumvents a lot of necessity for dispatchers with the +// benefit of instantiating the request context much earlier. +// +// TODO(stevvooe): Consider making this facility a part of the context package. +type contextManager struct { + contexts map[*http.Request]context.Context + mu sync.Mutex +} + +// defaultContextManager is just a global instance to register request contexts. +var defaultContextManager = newContextManager() + +func newContextManager() *contextManager { + return &contextManager{ + contexts: make(map[*http.Request]context.Context), + } +} + +// context either returns a new context or looks it up in the manager. +func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { + cm.mu.Lock() + defer cm.mu.Unlock() + + ctx, ok := cm.contexts[r] + if ok { + return ctx + } + + if parent == nil { + parent = ctxu.Background() + } + + ctx = ctxu.WithRequest(parent, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + cm.contexts[r] = ctx + + return ctx +} + +// releases frees any associated with resources from request. +func (cm *contextManager) release(ctx context.Context) { + cm.mu.Lock() + defer cm.mu.Unlock() + + r, err := ctxu.GetRequest(ctx) + if err != nil { + ctxu.GetLogger(ctx).Errorf("no request found in context during release") + return + } + delete(cm.contexts, r) +} From 0b2feaf611de8ca1e4b8f382162806653cc99db8 Mon Sep 17 00:00:00 2001 From: Richard Date: Tue, 7 Apr 2015 15:52:48 -0700 Subject: [PATCH 080/501] Automatically purge old upload files. When the registry starts a background timer will periodically scan the upload directories on the file system every 24 hours and delete any files older than 1 week. An initial jitter intends to avoid contention on the filesystem where multiple registries with the same storage driver are started simultaneously. --- docs/handlers/app.go | 28 +++++ docs/storage/paths.go | 8 ++ docs/storage/purgeuploads.go | 136 ++++++++++++++++++++++++ docs/storage/purgeuploads_test.go | 165 ++++++++++++++++++++++++++++++ docs/storage/walk.go | 50 +++++++++ docs/storage/walk_test.go | 119 +++++++++++++++++++++ 6 files changed, 506 insertions(+) create mode 100644 docs/storage/purgeuploads.go create mode 100644 docs/storage/purgeuploads_test.go create mode 100644 docs/storage/walk.go create mode 100644 docs/storage/walk_test.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index c106df47..1b8e854c 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -3,6 +3,7 @@ package handlers import ( "expvar" "fmt" + "math/rand" "net" "net/http" "os" @@ -79,6 +80,9 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // a health check. panic(err) } + + startUploadPurger(app.driver, ctxu.GetLogger(app)) + app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { panic(err) @@ -560,3 +564,27 @@ func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []co } return driver, nil } + +// startUploadPurger schedules a goroutine which will periodically +// check upload directories for old files and delete them +func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute + + // Start with reasonable defaults + // TODO:(richardscothern) make configurable + purgeAge := time.Duration(7 * 24 * time.Hour) + timeBetweenPurges := time.Duration(1 * 24 * time.Hour) + + go func() { + log.Infof("Starting upload purge in %s", jitter) + time.Sleep(jitter) + + for { + storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) + log.Infof("Starting upload purge in %s", timeBetweenPurges) + time.Sleep(timeBetweenPurges) + } + }() + +} diff --git a/docs/storage/paths.go b/docs/storage/paths.go index f541f079..7aeff6e4 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -257,6 +257,8 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { offset = "" // Limit to the prefix for listing offsets. } return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil + case repositoriesRootPathSpec: + return path.Join(repoPrefix...), nil default: // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). return "", fmt.Errorf("unknown path spec: %#v", v) @@ -446,6 +448,12 @@ type uploadHashStatePathSpec struct { func (uploadHashStatePathSpec) pathSpec() {} +// repositoriesRootPathSpec returns the root of repositories +type repositoriesRootPathSpec struct { +} + +func (repositoriesRootPathSpec) pathSpec() {} + // digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // diff --git a/docs/storage/purgeuploads.go b/docs/storage/purgeuploads.go new file mode 100644 index 00000000..13c468de --- /dev/null +++ b/docs/storage/purgeuploads.go @@ -0,0 +1,136 @@ +package storage + +import ( + "path" + "strings" + "time" + + "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// uploadData stored the location of temporary files created during a layer upload +// along with the date the upload was started +type uploadData struct { + containingDir string + startedAt time.Time +} + +func newUploadData() uploadData { + return uploadData{ + containingDir: "", + // default to far in future to protect against missing startedat + startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + } +} + +// PurgeUploads deletes files from the upload directory +// created before olderThan. The list of files deleted and errors +// encountered are returned +func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { + log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + uploadData, errors := getOutstandingUploads(driver) + var deleted []string + for _, uploadData := range uploadData { + if uploadData.startedAt.Before(olderThan) { + var err error + log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + uploadData.containingDir, uploadData.startedAt, olderThan) + if actuallyDelete { + err = driver.Delete(uploadData.containingDir) + } + if err == nil { + deleted = append(deleted, uploadData.containingDir) + } else { + errors = append(errors, err) + } + } + } + + log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + return deleted, errors +} + +// getOutstandingUploads walks the upload directory, collecting files +// which could be eligible for deletion. The only reliable way to +// classify the age of a file is with the date stored in the startedAt +// file, so gather files by UUID with a date from startedAt. +func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploadData, []error) { + var errors []error + uploads := make(map[string]uploadData, 0) + + inUploadDir := false + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return uploads, append(errors, err) + } + err = Walk(driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + if file[0] == '_' { + // Reserved directory + inUploadDir = (file == "_uploads") + + if fileInfo.IsDir() && !inUploadDir { + return ErrSkipDir + } + + } + + uuid, isContainingDir := uUIDFromPath(filePath) + if uuid == "" { + // Cannot reliably delete + return nil + } + ud, ok := uploads[uuid] + if !ok { + ud = newUploadData() + } + if isContainingDir { + ud.containingDir = filePath + } + if file == "startedat" { + if t, err := readStartedAtFile(driver, filePath); err == nil { + ud.startedAt = t + } else { + errors = pushError(errors, filePath, err) + } + + } + + uploads[uuid] = ud + return nil + }) + + if err != nil { + errors = pushError(errors, root, err) + } + return uploads, errors +} + +// uUIDFromPath extracts the upload UUID from a given path +// If the UUID is the last path component, this is the containing +// directory for all upload files +func uUIDFromPath(path string) (string, bool) { + components := strings.Split(path, "/") + for i := len(components) - 1; i >= 0; i-- { + if uuid := uuid.Parse(components[i]); uuid != nil { + return uuid.String(), i == len(components)-1 + } + } + return "", false +} + +// readStartedAtFile reads the date from an upload's startedAtFile +func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { + startedAtBytes, err := driver.GetContent(path) + if err != nil { + return time.Now(), err + } + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return time.Now(), err + } + return startedAt, nil +} diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go new file mode 100644 index 00000000..368e7c86 --- /dev/null +++ b/docs/storage/purgeuploads_test.go @@ -0,0 +1,165 @@ +package storage + +import ( + "path" + "strings" + "testing" + "time" + + "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +var pm = defaultPathMapper + +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { + d := inmemory.New() + for i := 0; i < numUploads; i++ { + addUploads(t, d, uuid.New(), repoName, startedAt) + } + return d +} + +func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { + dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + if err := d.PutContent(dataPath, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID}) + if err != nil { + t.Fatalf("Unable to resolve path") + } + + if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + t.Fatalf("Unable to write startedAt file") + } + +} + +func TestPurgeGather(t *testing.T) { + uploadCount := 5 + fs := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(fs) + if len(errs) != 0 { + t.Errorf("Unexepected errors: %q", errs) + } + if len(uploadData) != uploadCount { + t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) + } +} + +func TestPurgeNone(t *testing.T) { + fs := testUploadFS(t, 10, "test-repo", time.Now()) + oneHourAgo := time.Now().Add(-1 * time.Hour) + deleted, errs := PurgeUploads(fs, oneHourAgo, true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + if len(deleted) != 0 { + t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) + } +} + +func TestPurgeAll(t *testing.T) { + uploadCount := 10 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + + // Ensure > 1 repos are purged + addUploads(t, fs, uuid.New(), "test-repo2", oneHourAgo) + uploadCount++ + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + fileCount := uploadCount + if len(deleted) != fileCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), fileCount) + } +} + +func TestPurgeSome(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + + newUploadCount := 4 + + for i := 0; i < newUploadCount; i++ { + addUploads(t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) + } + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors:", errs) + } + if len(deleted) != oldUploadCount { + t.Errorf("Unexpectedly deleted file count %d != %d", + len(deleted), oldUploadCount) + } +} + +func TestPurgeOnlyUploads(t *testing.T) { + oldUploadCount := 5 + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + + // Create a directory tree outside _uploads and ensure + // these files aren't deleted. + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) + if err != nil { + t.Fatalf(err.Error()) + } + nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) + if strings.Index(nonUploadPath, "_upload") != -1 { + t.Fatalf("Non-upload path not created correctly") + } + + nonUploadFile := path.Join(nonUploadPath, "file") + if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { + t.Fatalf("Unable to write data file") + } + + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) != 0 { + t.Error("Unexpected errors", errs) + } + for _, file := range deleted { + if strings.Index(file, "_upload") == -1 { + t.Errorf("Non-upload file deleted") + } + } +} + +func TestPurgeMissingStartedAt(t *testing.T) { + oneHourAgo := time.Now().Add(-1 * time.Hour) + fs := testUploadFS(t, 1, "test-repo", oneHourAgo) + err := Walk(fs, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + _, file := path.Split(filePath) + + if file == "startedat" { + if err := fs.Delete(filePath); err != nil { + t.Fatalf("Unable to delete startedat file: %s", filePath) + } + } + return nil + }) + if err != nil { + t.Fatalf("Unexpected error during Walk: %s ", err.Error()) + } + deleted, errs := PurgeUploads(fs, time.Now(), true) + if len(errs) > 0 { + t.Errorf("Unexpected errors") + } + if len(deleted) > 0 { + t.Errorf("Files unexpectedly deleted: %s", deleted) + } +} diff --git a/docs/storage/walk.go b/docs/storage/walk.go new file mode 100644 index 00000000..7b958d87 --- /dev/null +++ b/docs/storage/walk.go @@ -0,0 +1,50 @@ +package storage + +import ( + "errors" + "fmt" + + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +// SkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +// If the returned error is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +type WalkFn func(fileInfo storageDriver.FileInfo) error + +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(from) + if err != nil { + return err + } + for _, child := range children { + fileInfo, err := driver.Stat(child) + if err != nil { + return err + } + err = f(fileInfo) + skipDir := (err == ErrSkipDir) + if err != nil && !skipDir { + return err + } + + if fileInfo.IsDir() && !skipDir { + Walk(driver, child, f) + } + } + return nil +} + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go new file mode 100644 index 00000000..22b91b35 --- /dev/null +++ b/docs/storage/walk_test.go @@ -0,0 +1,119 @@ +package storage + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { + d := inmemory.New() + c := []byte("") + if err := d.PutContent("/a/b/c/d", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + if err := d.PutContent("/a/b/c/e", c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + + expected := map[string]string{ + "/a": "dir", + "/a/b": "dir", + "/a/b/c": "dir", + "/a/b/c/d": "file", + "/a/b/c/e": "file", + } + + return d, expected +} + +func TestWalkErrors(t *testing.T) { + d, expected := testFS(t) + fileCount := len(expected) + err := Walk(d, "", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Error("Expected invalid root err") + } + + err = Walk(d, "/", func(fileInfo driver.FileInfo) error { + // error on the 2nd file + if fileInfo.Path() == "/a/b" { + return fmt.Errorf("Early termination") + } + delete(expected, fileInfo.Path()) + return nil + }) + if len(expected) != fileCount-1 { + t.Error("Walk failed to terminate with error") + } + if err != nil { + t.Error(err.Error()) + } + + err = Walk(d, "/nonexistant", func(fileInfo driver.FileInfo) error { + return nil + }) + if err == nil { + t.Errorf("Expected missing file err") + } + +} + +func TestWalk(t *testing.T) { + d, expected := testFS(t) + err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + filetype, ok := expected[filePath] + if !ok { + t.Fatalf("Unexpected file in walk: %q", filePath) + } + + if fileInfo.IsDir() { + if filetype != "dir" { + t.Errorf("Unexpected file type: %q", filePath) + } + } else { + if filetype != "file" { + t.Errorf("Unexpected file type: %q", filePath) + } + } + delete(expected, filePath) + return nil + }) + if len(expected) > 0 { + t.Errorf("Missed files in walk: %q", expected) + } + if err != nil { + t.Fatalf(err.Error()) + } +} + +func TestWalkSkipDir(t *testing.T) { + d, expected := testFS(t) + err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + filePath := fileInfo.Path() + if filePath == "/a/b" { + // skip processing /a/b/c and /a/b/c/d + return ErrSkipDir + } + delete(expected, filePath) + return nil + }) + if err != nil { + t.Fatalf(err.Error()) + } + if _, ok := expected["/a/b/c"]; !ok { + t.Errorf("/a/b/c not skipped") + } + if _, ok := expected["/a/b/c/d"]; !ok { + t.Errorf("/a/b/c/d not skipped") + } + if _, ok := expected["/a/b/c/e"]; !ok { + t.Errorf("/a/b/c/e not skipped") + } + +} From f3f46307f2a4009bc7005a73fcd764b783bc8336 Mon Sep 17 00:00:00 2001 From: bin liu Date: Fri, 17 Apr 2015 12:39:52 +0000 Subject: [PATCH 081/501] fix some typos in source comments Signed-off-by: bin liu --- docs/auth/auth.go | 4 ++-- docs/auth/token/util.go | 2 +- docs/storage/driver/ipc/server.go | 6 +++--- docs/storage/driver/testsuites/testsuites.go | 2 +- docs/storage/layer_test.go | 2 +- docs/storage/layerwriter.go | 2 +- docs/storage/paths.go | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index a8499342..ec82b469 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -3,7 +3,7 @@ // An access controller has a simple interface with a single `Authorized` // method which checks that a given request is authorized to perform one or // more actions on one or more resources. This method should return a non-nil -// error if the requset is not authorized. +// error if the request is not authorized. // // An implementation registers its access controller by name with a constructor // which accepts an options map for configuring the access controller. @@ -50,7 +50,7 @@ type Resource struct { } // Access describes a specific action that is -// requested or allowed for a given recource. +// requested or allowed for a given resource. type Access struct { Resource Action string diff --git a/docs/auth/token/util.go b/docs/auth/token/util.go index bf3e01e8..d7f95be4 100644 --- a/docs/auth/token/util.go +++ b/docs/auth/token/util.go @@ -7,7 +7,7 @@ import ( ) // joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance +// encoding format but with all trailing '=' characters omitted in accordance // with the jose specification. // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 func joseBase64UrlEncode(b []byte) string { diff --git a/docs/storage/driver/ipc/server.go b/docs/storage/driver/ipc/server.go index 4c6f1d4d..1752f12b 100644 --- a/docs/storage/driver/ipc/server.go +++ b/docs/storage/driver/ipc/server.go @@ -101,7 +101,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { } case "ReadStream": path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be convereted to any int/uint type + // Depending on serialization method, Offset may be converted to any int/uint type offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() reader, err := driver.ReadStream(path, offset) var response ReadStreamResponse @@ -116,9 +116,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { } case "WriteStream": path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be convereted to any int/uint type + // Depending on serialization method, Offset may be converted to any int/uint type offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - // Depending on serialization method, Size may be convereted to any int/uint type + // Depending on serialization method, Size may be converted to any int/uint type size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() reader, _ := request.Parameters["Reader"].(io.ReadCloser) err := driver.WriteStream(path, offset, size, reader) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 74ddab6f..9f387a62 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -435,7 +435,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, fullContents) - // Writing past size of file extends file (no offest error). We would like + // Writing past size of file extends file (no offset error). We would like // to write chunk 4 one chunk length past chunk 3. It should be successful // and the resulting file will be 5 chunks long, with a chunk of all // zeros. diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index e225d068..f25018da 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -336,7 +336,7 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) { // createTestLayer creates a simple test layer in the provided driver under // tarsum dgst, returning the sha256 digest location. This is implemented -// peicemeal and should probably be replaced by the uploader when it's ready. +// piecemeal and should probably be replaced by the uploader when it's ready. func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { h := sha256.New() rd := io.TeeReader(content, h) diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index 1e5ea918..0305d011 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -182,7 +182,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { } if offset == int64(lw.resumableDigester.Len()) { - // State of digester is already at the requseted offset. + // State of digester is already at the requested offset. return nil } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 7aeff6e4..fe648f51 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -387,7 +387,7 @@ type layerLinkPathSpec struct { func (layerLinkPathSpec) pathSpec() {} // blobAlgorithmReplacer does some very simple path sanitization for user -// input. Mostly, this is to provide some heirachry for tarsum digests. Paths +// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths // should be "safe" before getting this far due to strict digest requirements // but we can add further path conversion here, if needed. var blobAlgorithmReplacer = strings.NewReplacer( From bc2b6efaa693eb1746a38492f3c58b5a732df3ad Mon Sep 17 00:00:00 2001 From: Richard Date: Thu, 16 Apr 2015 11:37:31 -0700 Subject: [PATCH 082/501] Add path and other info to filesytem trace methods. Also fix Delete (was 'Move'). --- docs/storage/driver/base/base.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index ba7a859d..6b7bcf0f 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -34,7 +34,7 @@ // } // // The type now implements StorageDriver, proxying through Base, without -// exporting an unnessecary field. +// exporting an unnecessary field. package base import ( @@ -53,7 +53,7 @@ type Base struct { // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(path string) ([]byte, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.GetContent") + defer done("Base.GetContent(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} @@ -65,7 +65,7 @@ func (base *Base) GetContent(path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(path string, content []byte) error { _, done := context.WithTrace(context.Background()) - defer done("Base.PutContent") + defer done("Base.PutContent(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} @@ -77,7 +77,7 @@ func (base *Base) PutContent(path string, content []byte) error { // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.ReadStream") + defer done("Base.ReadStream(\"%s\", %d)", path, offset) if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} @@ -93,7 +93,7 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream wraps WriteStream of underlying storage driver. func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { _, done := context.WithTrace(context.Background()) - defer done("Base.WriteStream") + defer done("Base.WriteStream(\"%s\", %d)", path, offset) if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} @@ -109,7 +109,7 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.Stat") + defer done("Base.Stat(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} @@ -121,7 +121,7 @@ func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { // List wraps List of underlying storage driver. func (base *Base) List(path string) ([]string, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.List") + defer done("Base.List(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} @@ -133,7 +133,7 @@ func (base *Base) List(path string) ([]string, error) { // Move wraps Move of underlying storage driver. func (base *Base) Move(sourcePath string, destPath string) error { _, done := context.WithTrace(context.Background()) - defer done("Base.Move") + defer done("Base.Move(\"%s\", \"%s\"", sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { return storagedriver.InvalidPathError{Path: sourcePath} @@ -147,7 +147,7 @@ func (base *Base) Move(sourcePath string, destPath string) error { // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(path string) error { _, done := context.WithTrace(context.Background()) - defer done("Base.Move") + defer done("Base.Delete(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} @@ -159,7 +159,7 @@ func (base *Base) Delete(path string) error { // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.URLFor") + defer done("Base.URLFor(\"%s\")", path) if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} From 431811056bb995a4e48471c74deea5d975283ce9 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 20 Apr 2015 16:35:09 -0700 Subject: [PATCH 083/501] Add logging for generic handler errors. Signed-off-by: Richard Scothern --- docs/handlers/app.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 28940c8e..e35d8633 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -365,11 +365,25 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // future refactoring. w.WriteHeader(http.StatusBadRequest) } + app.logError(context, context.Errors) serveJSON(w, context.Errors) } }) } +func (app *App) logError(context context.Context, errors v2.Errors) { + for _, e := range errors.Errors { + c := ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Message) + c = ctxu.WithValue(c, "err.detail", e.Detail) + c = ctxu.WithLogger(c, ctxu.GetLogger(c, + "err.code", + "err.message", + "err.detail")) + ctxu.GetLogger(c).Errorf("An error occured") + } +} + // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { From d4ce8f5ef8adc994b34bf02b45d3081cb697d8eb Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 20 Apr 2015 18:43:19 -0700 Subject: [PATCH 084/501] Attempt to deal with eventual consistency by retrying Rather than accept the resulting of a layer validation, we retry up to three times, backing off 100ms after each try. The thought is that we allow s3 files to make their way into the correct location increasing the liklihood the verification can proceed, if possible. Signed-off-by: Stephen J Day --- docs/storage/layerwriter.go | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index 0305d011..fe118597 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -46,16 +46,37 @@ func (lw *layerWriter) StartedAt() time.Time { // uploaded layer. The final size and checksum are validated against the // contents of the uploaded layer. The checksum should be provided in the // format :. -func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error) { +func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") if err := lw.bufferedFileWriter.Close(); err != nil { return nil, err } - canonical, err := lw.validateLayer(digest) - if err != nil { + var ( + canonical digest.Digest + err error + ) + + // HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry + // validation on failure. Three attempts are made, backing off 100ms each + // time. + for retries := 0; ; retries++ { + canonical, err = lw.validateLayer(dgst) + if err == nil { + break + } + + ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries). + Errorf("error validating layer: %v", err) + + if retries < 3 { + time.Sleep(100 * time.Millisecond) + continue + } + return nil, err + } if err := lw.moveLayer(canonical); err != nil { @@ -64,7 +85,7 @@ func (lw *layerWriter) Finish(digest digest.Digest) (distribution.Layer, error) } // Link the layer blob into the repository. - if err := lw.linkLayer(canonical, digest); err != nil { + if err := lw.linkLayer(canonical, dgst); err != nil { return nil, err } From 77b30cfb2573520edd1cbdf41f03d779e97fa63b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 21 Apr 2015 11:34:18 -0700 Subject: [PATCH 085/501] log canonical digest on verification error Signed-off-by: Stephen J Day --- docs/storage/layerwriter.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index fe118597..0a42aa40 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -345,6 +345,8 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) } if !verified { + ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst). + Errorf("canonical digest does match provided digest") return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, Reason: fmt.Errorf("content does not match digest"), From 36ffe0c134aba840c81e961fb33260f6fb360d7b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 21 Apr 2015 12:10:48 -0700 Subject: [PATCH 086/501] Backoff retry on verification to give s3 time to propagate Signed-off-by: Stephen J Day --- docs/storage/layerwriter.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index fe118597..93c93b8a 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -59,8 +59,8 @@ func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { ) // HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry - // validation on failure. Three attempts are made, backing off 100ms each - // time. + // validation on failure. Three attempts are made, backing off + // retries*100ms each time. for retries := 0; ; retries++ { canonical, err = lw.validateLayer(dgst) if err == nil { @@ -71,7 +71,7 @@ func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { Errorf("error validating layer: %v", err) if retries < 3 { - time.Sleep(100 * time.Millisecond) + time.Sleep(100 * time.Millisecond * time.Duration(retries+1)) continue } From f1ea982e82289edc14eb1c63c432851d5b1a59eb Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Apr 2015 12:12:59 -0700 Subject: [PATCH 087/501] Check error returned from io.Copy Signed-off-by: Stephen J Day --- docs/handlers/layerupload.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index b728d0e1..8c96b7a6 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -198,7 +198,11 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * // may miss a root cause. // Read in the final chunk, if any. - io.Copy(luh.Upload, r.Body) + if _, err := io.Copy(luh.Upload, r.Body); err != nil { + ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + } layer, err := luh.Upload.Finish(dgst) if err != nil { From e4794ff73dc42e51021f4013c581f1d108a025c6 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Apr 2015 14:31:34 -0700 Subject: [PATCH 088/501] Allow configuration of chunksize parameter The code using values from the yaml package wasn't careful enought with the possible incoming types. Turns out, it is just an int but we've made this section somewhat bulletproof in case that package changes the behavior. This code likely never worked. The configuration system should be decoupled from the object instantiation. Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 402f2eaa..cf58df04 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -20,6 +20,7 @@ import ( "io" "io/ioutil" "net/http" + "reflect" "strconv" "strings" "time" @@ -148,9 +149,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { chunkSize := int64(defaultChunkSize) chunkSizeParam, ok := parameters["chunksize"] if ok { - chunkSize, ok = chunkSizeParam.(int64) - if !ok || chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize parameter should be a number that is larger than 5*1024*1024") + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize <= minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) } } From c49f7cd0154b3bfc18d53e48a9fb46586092f71f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Apr 2015 15:07:18 -0700 Subject: [PATCH 089/501] Pool buffers used in S3.WriteStream Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 402f2eaa..1b04d784 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -22,6 +22,7 @@ import ( "net/http" "strconv" "strings" + "sync" "time" "github.com/AdRoll/goamz/aws" @@ -72,6 +73,9 @@ type driver struct { ChunkSize int64 Encrypt bool RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { @@ -224,6 +228,11 @@ func New(params DriverParameters) (*Driver, error) { ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) } return &Driver{ @@ -287,8 +296,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total return 0, err } - buf := make([]byte, d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) + buf := d.getbuf() // We never want to leave a dangling multipart upload, our only consistent state is // when there is a whole object at path. This is in order to remain consistent with @@ -314,6 +322,8 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total } } } + + d.putbuf(buf) // needs to be here to pick up new buf value }() // Fills from 0 to total from current @@ -367,6 +377,8 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total } go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + // parts and partNumber are safe, because this function is the only one modifying them and we // force it to be executed serially. if bytesRead > 0 { @@ -381,7 +393,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total putErrChan <- nil }(bytesRead, from, buf) - buf = make([]byte, d.ChunkSize) + buf = d.getbuf() // use a new buffer for the next call return nil } @@ -429,7 +441,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total fromZeroFillSmall := func(from, to int64) error { bytesRead = 0 for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(zeroBuf).Read(buf[from+int64(bytesRead) : to]) + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) bytesRead += nn if err != nil { return err @@ -443,7 +455,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total fromZeroFillLarge := func(from, to int64) error { bytesRead64 := int64(0) for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(zeroBuf)) + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) if err != nil { return err } @@ -724,3 +736,13 @@ func getPermissions() s3.ACL { func (d *driver) getContentType() string { return "application/octet-stream" } + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} From b361b4811ba1b0df7c41bd14ff01afb303481044 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Apr 2015 17:30:01 -0700 Subject: [PATCH 090/501] Require storage drivers to report their name Signed-off-by: Stephen J Day --- docs/storage/driver/azure/azure.go | 3 +++ docs/storage/driver/filesystem/driver.go | 4 ++++ docs/storage/driver/inmemory/driver.go | 4 ++++ docs/storage/driver/s3/s3.go | 4 ++++ docs/storage/driver/storagedriver.go | 5 +++++ 5 files changed, 20 insertions(+) diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index 1473f523..b985b7a9 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -94,6 +94,9 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { } // Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(path string) ([]byte, error) { diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 0e5aea75..9ffe0888 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -71,6 +71,10 @@ func New(rootDirectory string) *Driver { // Implement the storagedriver.StorageDriver interface +func (d *driver) Name() string { + return driverName +} + // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(path string) ([]byte, error) { rc, err := d.ReadStream(path, 0) diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go index f2c9c3ff..e0694de2 100644 --- a/docs/storage/driver/inmemory/driver.go +++ b/docs/storage/driver/inmemory/driver.go @@ -64,6 +64,10 @@ func New() *Driver { // Implement the storagedriver.StorageDriver interface. +func (d *driver) Name() string { + return driverName +} + // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(path string) ([]byte, error) { d.mutex.RLock() diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 92267fc4..4fd14b44 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -261,6 +261,10 @@ func New(params DriverParameters) (*Driver, error) { // Implement the storagedriver.StorageDriver interface +func (d *driver) Name() string { + return driverName +} + // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(path string) ([]byte, error) { content, err := d.Bucket.Get(d.s3Path(path)) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index 442dc257..cda1c37d 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -35,6 +35,11 @@ const CurrentVersion Version = "0.1" // StorageDriver defines methods that a Storage Driver must implement for a // filesystem-like key/value object storage. type StorageDriver interface { + // Name returns the human-readable "name" of the driver, useful in error + // messages and logging. By convention, this will just be the registration + // name, but drivers may provide other information here. + Name() string + // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. GetContent(path string) ([]byte, error) From ecda1f4eff147603738593f156c0b7a78278311c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Apr 2015 17:30:31 -0700 Subject: [PATCH 091/501] Include driver name in trace messsages Signed-off-by: Stephen J Day --- docs/storage/driver/base/base.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index 6b7bcf0f..8fa747dd 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -53,7 +53,7 @@ type Base struct { // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(path string) ([]byte, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.GetContent(\"%s\")", path) + defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} @@ -65,7 +65,7 @@ func (base *Base) GetContent(path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(path string, content []byte) error { _, done := context.WithTrace(context.Background()) - defer done("Base.PutContent(\"%s\")", path) + defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} @@ -77,7 +77,7 @@ func (base *Base) PutContent(path string, content []byte) error { // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.ReadStream(\"%s\", %d)", path, offset) + defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} @@ -93,7 +93,7 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream wraps WriteStream of underlying storage driver. func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { _, done := context.WithTrace(context.Background()) - defer done("Base.WriteStream(\"%s\", %d)", path, offset) + defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} @@ -109,7 +109,7 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.Stat(\"%s\")", path) + defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} @@ -121,7 +121,7 @@ func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { // List wraps List of underlying storage driver. func (base *Base) List(path string) ([]string, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.List(\"%s\")", path) + defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} @@ -133,7 +133,7 @@ func (base *Base) List(path string) ([]string, error) { // Move wraps Move of underlying storage driver. func (base *Base) Move(sourcePath string, destPath string) error { _, done := context.WithTrace(context.Background()) - defer done("Base.Move(\"%s\", \"%s\"", sourcePath, destPath) + defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { return storagedriver.InvalidPathError{Path: sourcePath} @@ -147,7 +147,7 @@ func (base *Base) Move(sourcePath string, destPath string) error { // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(path string) error { _, done := context.WithTrace(context.Background()) - defer done("Base.Delete(\"%s\")", path) + defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} @@ -159,7 +159,7 @@ func (base *Base) Delete(path string) error { // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { _, done := context.WithTrace(context.Background()) - defer done("Base.URLFor(\"%s\")", path) + defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} From 8d4b636a60ffd7838e798f940a355c019f747101 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Apr 2015 13:13:13 -0700 Subject: [PATCH 092/501] Return after error in handler This adds a missing return statement. It is not strictly needed since if the io.Copy fails, the Finish operation will fail. Currently, the client reports both errors where this new code will correctly only report the io.Copy error. Signed-off-by: Stephen J Day --- docs/handlers/layerupload.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 8c96b7a6..5cfa4554 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -202,6 +202,7 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) w.WriteHeader(http.StatusInternalServerError) luh.Errors.Push(v2.ErrorCodeUnknown, err) + return } layer, err := luh.Upload.Finish(dgst) From 0d8cb4dca8d8a554720c1f37e08a11b39d70df61 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Apr 2015 16:31:41 -0700 Subject: [PATCH 093/501] Correctly check s3 chunksize parameter Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 92267fc4..f9b1ea2f 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -168,7 +168,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) } - if chunkSize <= minChunkSize { + if chunkSize < minChunkSize { return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) } } From 0f897aea8fc9c3b5c0734e26f70c77049560e9f9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Apr 2015 20:07:32 -0700 Subject: [PATCH 094/501] Attempt to address intermittent s3 RequestTimeout error Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 65 +++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 92267fc4..ee680fdc 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -28,6 +28,7 @@ import ( "github.com/AdRoll/goamz/aws" "github.com/AdRoll/goamz/s3" + "github.com/Sirupsen/logrus" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -394,18 +395,64 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total go func(bytesRead int, from int64, buf []byte) { defer d.putbuf(buf) // this buffer gets dropped after this call - // parts and partNumber are safe, because this function is the only one modifying them and we - // force it to be executed serially. - if bytesRead > 0 { - part, putErr := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if putErr != nil { - putErrChan <- putErr + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the s3 + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying s3 library should handle it, it doesn't seem to + // be part of the shouldRetry function (see AdRoll/goamz/s3). + defer func() { + putErrChan <- nil // for some reason, we do this no matter what. + }() + + if bytesRead <= 0 { + return + } + + var err error + var part s3.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! } - parts = append(parts, part) - partNumber++ + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the s3 package does not. We may add s3 + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to s3 slows to a crawl. If the RequestTimeout + // ends up getting added to the s3 library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *s3.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) } - putErrChan <- nil + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + putErrChan <- err + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ }(bytesRead, from, buf) buf = d.getbuf() // use a new buffer for the next call From 2c7489e6b208c816b34c205ad8a4089afbe0a244 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Fri, 24 Apr 2015 14:04:48 -0700 Subject: [PATCH 095/501] Updated urlbuilder X-Forwarded-Host logic According to the Apache mod_proxy docs, X-Forwarded-Host can be a comma-separated list of hosts, to which each proxy appends the requested host. We want to grab only the first from this comma-separated list to get the original requested Host when building URLs. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/api/v2/urls.go | 7 ++++++- docs/api/v2/urls_test.go | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 4b42dd16..60aad565 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -62,7 +62,12 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { host := r.Host forwardedHost := r.Header.Get("X-Forwarded-Host") if len(forwardedHost) > 0 { - host = forwardedHost + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) } basePath := routeDescriptorsMap[RouteNameBase].Path diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 237d0f61..1113a7dd 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -151,6 +151,12 @@ func TestBuilderFromRequest(t *testing.T) { forwardedProtoHeader := make(http.Header, 1) forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + forwardedHostHeader1 := make(http.Header, 1) + forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") + + forwardedHostHeader2 := make(http.Header, 1) + forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") + testRequests := []struct { request *http.Request base string @@ -163,6 +169,14 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com", }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, + base: "http://first.example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "http://first.example.com", + }, } for _, tr := range testRequests { From 10f32bfcd53f53e17d45ac4f218d49a8d9ad6e3d Mon Sep 17 00:00:00 2001 From: xiekeyang Date: Mon, 27 Apr 2015 15:18:55 +0800 Subject: [PATCH 096/501] simplify the embedded method expression of repository Signed-off-by: xiekeyang --- docs/storage/layerstore.go | 8 ++++---- docs/storage/layerwriter.go | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index 1c7428a9..a86b668f 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -65,7 +65,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) { uuid := uuid.New() startedAt := time.Now().UTC() - path, err := ls.repository.registry.pm.path(uploadDataPathSpec{ + path, err := ls.repository.pm.path(uploadDataPathSpec{ name: ls.repository.Name(), uuid: uuid, }) @@ -74,7 +74,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) { return nil, err } - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ name: ls.repository.Name(), uuid: uuid, }) @@ -95,7 +95,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) { // state of the upload. func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") - startedAtPath, err := ls.repository.registry.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ name: ls.repository.Name(), uuid: uuid, }) @@ -152,7 +152,7 @@ func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (di func (ls *layerStore) path(dgst digest.Digest) (string, error) { // We must traverse this path through the link to enforce ownership. - layerLinkPath, err := ls.repository.registry.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) + layerLinkPath, err := ls.repository.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) if err != nil { return "", err } diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index 3efd60a4..adf68ca9 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -158,7 +158,7 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + uploadHashStatePathPrefix, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ name: lw.layerStore.repository.Name(), uuid: lw.uuid, alg: lw.resumableDigester.Digest().Algorithm(), @@ -271,7 +271,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { } func (lw *layerWriter) storeHashState() error { - uploadHashStatePath, err := lw.layerStore.repository.registry.pm.path(uploadHashStatePathSpec{ + uploadHashStatePath, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ name: lw.layerStore.repository.Name(), uuid: lw.uuid, alg: lw.resumableDigester.Digest().Algorithm(), @@ -360,7 +360,7 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) // identified by dgst. The layer should be validated before commencing the // move. func (lw *layerWriter) moveLayer(dgst digest.Digest) error { - blobPath, err := lw.layerStore.repository.registry.pm.path(blobDataPathSpec{ + blobPath, err := lw.layerStore.repository.pm.path(blobDataPathSpec{ digest: dgst, }) @@ -426,7 +426,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige } seenDigests[dgst] = struct{}{} - layerLinkPath, err := lw.layerStore.repository.registry.pm.path(layerLinkPathSpec{ + layerLinkPath, err := lw.layerStore.repository.pm.path(layerLinkPathSpec{ name: lw.layerStore.repository.Name(), digest: dgst, }) @@ -435,7 +435,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige return err } - if err := lw.layerStore.repository.registry.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { + if err := lw.layerStore.repository.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { return err } } @@ -447,7 +447,7 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige // instance. An error will be returned if the clean up cannot proceed. If the // resources are already not present, no error will be returned. func (lw *layerWriter) removeResources() error { - dataPath, err := lw.layerStore.repository.registry.pm.path(uploadDataPathSpec{ + dataPath, err := lw.layerStore.repository.pm.path(uploadDataPathSpec{ name: lw.layerStore.repository.Name(), uuid: lw.uuid, }) From 5caa1fe3b0fe2fd0fdb6491ce8ac42a0c273fbd3 Mon Sep 17 00:00:00 2001 From: Richard Date: Thu, 16 Apr 2015 18:34:29 -0700 Subject: [PATCH 097/501] Add configuration for upload purging Signed-off-by: Richard Scothern --- docs/handlers/app.go | 91 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 79 insertions(+), 12 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index e35d8633..3cc360c6 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -81,7 +81,18 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App panic(err) } - startUploadPurger(app.driver, ctxu.GetLogger(app)) + purgeConfig := uploadPurgeDefaultConfig() + if mc, ok := configuration.Storage["maintenance"]; ok { + for k, v := range mc { + switch k { + case "uploadpurging": + purgeConfig = v.(map[interface{}]interface{}) + } + } + + } + + startUploadPurger(app.driver, ctxu.GetLogger(app), purgeConfig) app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { @@ -568,26 +579,82 @@ func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []co return driver, nil } +// uploadPurgeDefaultConfig provides a default configuration for upload +// purging to be used in the absence of configuration in the +// confifuration file +func uploadPurgeDefaultConfig() map[interface{}]interface{} { + config := map[interface{}]interface{}{} + config["enabled"] = true + config["age"] = "168h" + config["interval"] = "24h" + config["dryrun"] = false + return config +} + +func badPurgeUploadConfig(reason string) { + panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) +} + // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger) { - rand.Seed(time.Now().Unix()) - jitter := time.Duration(rand.Int()%60) * time.Minute +func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { + if config["enabled"] == false { + return + } - // Start with reasonable defaults - // TODO:(richardscothern) make configurable - purgeAge := time.Duration(7 * 24 * time.Hour) - timeBetweenPurges := time.Duration(1 * 24 * time.Hour) + var purgeAgeDuration time.Duration + var err error + purgeAge, ok := config["age"] + if ok { + ageStr, ok := purgeAge.(string) + if !ok { + badPurgeUploadConfig("age is not a string") + } + purgeAgeDuration, err = time.ParseDuration(ageStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) + } + } else { + badPurgeUploadConfig("age missing") + } + + var intervalDuration time.Duration + interval, ok := config["interval"] + if ok { + intervalStr, ok := interval.(string) + if !ok { + badPurgeUploadConfig("interval is not a string") + } + + intervalDuration, err = time.ParseDuration(intervalStr) + if err != nil { + badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) + } + } else { + badPurgeUploadConfig("interval missing") + } + + var dryRunBool bool + dryRun, ok := config["dryrun"] + if ok { + dryRunBool, ok = dryRun.(bool) + if !ok { + badPurgeUploadConfig("cannot parse dryrun") + } + } else { + badPurgeUploadConfig("dryrun missing") + } go func() { + rand.Seed(time.Now().Unix()) + jitter := time.Duration(rand.Int()%60) * time.Minute log.Infof("Starting upload purge in %s", jitter) time.Sleep(jitter) for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAge), true) - log.Infof("Starting upload purge in %s", timeBetweenPurges) - time.Sleep(timeBetweenPurges) + storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + log.Infof("Starting upload purge in %s", intervalDuration) + time.Sleep(intervalDuration) } }() - } From 5d9105bd25827e5397a5a1c079a45d7085b76e26 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 27 Apr 2015 15:58:58 -0700 Subject: [PATCH 098/501] Make Storage Driver API calls context aware. - Change driver interface to take a context as its first argument - Make newFileReader take a context as its first argument - Make newFileWriter take a context as its first argument - Make blobstore exists and delete take a context as a first argument - Pass the layerreader's context to the storage layer - Pass the app's context to purgeuploads - Store the app's context into the blobstore (was previously null) - Pass the trace'd context to the storage drivers Signed-off-by: Richard Scothern --- docs/handlers/app.go | 14 +- docs/handlers/app_test.go | 5 +- docs/handlers/layer.go | 6 +- docs/storage/blobstore.go | 21 +- docs/storage/driver/azure/azure.go | 19 +- docs/storage/driver/base/base.go | 54 ++-- docs/storage/driver/filesystem/driver.go | 23 +- docs/storage/driver/inmemory/driver.go | 21 +- .../middleware/cloudfront/middleware.go | 6 +- docs/storage/driver/s3/s3.go | 24 +- docs/storage/driver/s3/s3_test.go | 10 +- docs/storage/driver/storagedriver.go | 20 +- docs/storage/driver/testsuites/testsuites.go | 243 +++++++++--------- docs/storage/filereader.go | 10 +- docs/storage/filereader_test.go | 16 +- docs/storage/filewriter.go | 10 +- docs/storage/filewriter_test.go | 24 +- docs/storage/layer_test.go | 13 +- docs/storage/layerreader.go | 2 +- docs/storage/layerstore.go | 22 +- docs/storage/layerwriter.go | 41 +-- docs/storage/manifeststore_test.go | 2 +- docs/storage/purgeuploads.go | 14 +- docs/storage/purgeuploads_test.go | 49 ++-- docs/storage/registry.go | 3 +- docs/storage/revisionstore.go | 6 +- docs/storage/signaturestore.go | 2 +- docs/storage/tagstore.go | 11 +- docs/storage/walk.go | 9 +- docs/storage/walk_test.go | 26 +- 30 files changed, 383 insertions(+), 343 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 3cc360c6..40181afa 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -73,7 +73,6 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App var err error app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) - if err != nil { // TODO(stevvooe): Move the creation of a service into a protected // method, where this is created lazily. Its status can be queried via @@ -92,7 +91,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App } - startUploadPurger(app.driver, ctxu.GetLogger(app), purgeConfig) + startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { @@ -109,10 +108,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewRedisLayerInfoCache(app.redis)) + app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewRedisLayerInfoCache(app.redis)) ctxu.GetLogger(app).Infof("using redis layerinfo cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app.driver, cache.NewInMemoryLayerInfoCache()) + app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewInMemoryLayerInfoCache()) ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") default: if cc["layerinfo"] != "" { @@ -123,7 +122,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.driver, nil) + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil) } app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) @@ -365,7 +364,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } dispatch(context, r).ServeHTTP(w, r) - // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). @@ -597,7 +595,7 @@ func badPurgeUploadConfig(reason string) { // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them -func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { if config["enabled"] == false { return } @@ -652,7 +650,7 @@ func startUploadPurger(storageDriver storagedriver.StorageDriver, log ctxu.Logge time.Sleep(jitter) for { - storage.PurgeUploads(storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) + storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) log.Infof("Starting upload purge in %s", intervalDuration) time.Sleep(intervalDuration) } diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index d0b9174d..8ea5b1e5 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -24,12 +24,13 @@ import ( // tested individually. func TestAppDispatcher(t *testing.T) { driver := inmemory.New() + ctx := context.Background() app := &App{ Config: configuration.Configuration{}, - Context: context.Background(), + Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()), + registry: storage.NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go index b8230135..13ee8560 100644 --- a/docs/handlers/layer.go +++ b/docs/handlers/layer.go @@ -4,7 +4,7 @@ import ( "net/http" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" @@ -48,7 +48,7 @@ type layerHandler struct { // GetLayer fetches the binary data from backend storage returns it in the // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(lh).Debug("GetImageLayer") + context.GetLogger(lh).Debug("GetImageLayer") layers := lh.Repository.Layers() layer, err := layers.Fetch(lh.Digest) @@ -65,7 +65,7 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { handler, err := layer.Handler(r) if err != nil { - ctxu.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) + context.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) lh.Errors.Push(v2.ErrorCodeUnknown, err) return } diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 8bab2f5e..c0c86929 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -3,10 +3,9 @@ package storage import ( "fmt" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" ) // TODO(stevvooe): Currently, the blobStore implementation used by the @@ -32,7 +31,7 @@ func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { return false, err } - ok, err := exists(bs.driver, path) + ok, err := exists(bs.ctx, bs.driver, path) if err != nil { return false, err } @@ -48,7 +47,7 @@ func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { return nil, err } - return bs.driver.GetContent(bp) + return bs.driver.GetContent(bs.ctx, bp) } // link links the path to the provided digest by writing the digest into the @@ -62,7 +61,7 @@ func (bs *blobStore) link(path string, dgst digest.Digest) error { // The contents of the "link" file are the exact string contents of the // digest, which is specified in that package. - return bs.driver.PutContent(path, []byte(dgst)) + return bs.driver.PutContent(bs.ctx, path, []byte(dgst)) } // linked reads the link at path and returns the content. @@ -77,7 +76,7 @@ func (bs *blobStore) linked(path string) ([]byte, error) { // readlink returns the linked digest at path. func (bs *blobStore) readlink(path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(path) + content, err := bs.driver.GetContent(bs.ctx, path) if err != nil { return "", err } @@ -112,7 +111,7 @@ func (bs *blobStore) resolve(path string) (string, error) { func (bs *blobStore) put(p []byte) (digest.Digest, error) { dgst, err := digest.FromBytes(p) if err != nil { - ctxu.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) + context.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) return "", err } @@ -128,7 +127,7 @@ func (bs *blobStore) put(p []byte) (digest.Digest, error) { return dgst, nil } - return dgst, bs.driver.PutContent(bp, p) + return dgst, bs.driver.PutContent(bs.ctx, bp, p) } // path returns the canonical path for the blob identified by digest. The blob @@ -145,9 +144,9 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) { return bp, nil } -// exists provides a utility method to test whether or not -func exists(driver storagedriver.StorageDriver, path string) (bool, error) { - if _, err := driver.Stat(path); err != nil { +// exists provides a utility method to test whether or not a path exists +func exists(ctx context.Context, driver storagedriver.StorageDriver, path string) (bool, error) { + if _, err := driver.Stat(ctx, path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: return false, nil diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index b985b7a9..d21a8259 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -99,7 +100,7 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { blob, err := d.client.GetBlob(d.container, path) if err != nil { if is404(err) { @@ -112,13 +113,13 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { @@ -145,7 +146,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64, error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { if blobExists, err := d.client.BlobExists(d.container, path); err != nil { return 0, err } else if !blobExists { @@ -166,7 +167,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (int64 // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { // Check if the path is a blob if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err @@ -215,7 +216,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { if path == "/" { path = "" } @@ -231,7 +232,7 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) if err != nil { @@ -245,7 +246,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { ok, err := d.client.DeleteBlobIfExists(d.container, path) if err != nil { return err @@ -275,7 +276,7 @@ func (d *driver) Delete(path string) error { // URLFor returns a publicly accessible URL for the blob stored at given path // for specified duration by making use of Azure Storage Shared Access Signatures (SAS). // See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration expires, ok := options["expiry"] if ok { diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index 8fa747dd..ae28b187 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -51,32 +51,32 @@ type Base struct { } // GetContent wraps GetContent of underlying storage driver. -func (base *Base) GetContent(path string) ([]byte, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.GetContent(path) + return base.StorageDriver.GetContent(ctx, path) } // PutContent wraps PutContent of underlying storage driver. -func (base *Base) PutContent(path string, content []byte) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { + ctx, done := context.WithTrace(context.Background()) defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.PutContent(path, content) + return base.StorageDriver.PutContent(ctx, path, content) } // ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + ctx, done := context.WithTrace(context.Background()) defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { @@ -87,12 +87,12 @@ func (base *Base) ReadStream(path string, offset int64) (io.ReadCloser, error) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.ReadStream(path, offset) + return base.StorageDriver.ReadStream(ctx, path, offset) } // WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + ctx, done := context.WithTrace(ctx) defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) if offset < 0 { @@ -103,36 +103,36 @@ func (base *Base) WriteStream(path string, offset int64, reader io.Reader) (nn i return 0, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.WriteStream(path, offset, reader) + return base.StorageDriver.WriteStream(ctx, path, offset, reader) } // Stat wraps Stat of underlying storage driver. -func (base *Base) Stat(path string) (storagedriver.FileInfo, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.Stat(path) + return base.StorageDriver.Stat(ctx, path) } // List wraps List of underlying storage driver. -func (base *Base) List(path string) ([]string, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) List(ctx context.Context, path string) ([]string, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.List(path) + return base.StorageDriver.List(ctx, path) } // Move wraps Move of underlying storage driver. -func (base *Base) Move(sourcePath string, destPath string) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { + ctx, done := context.WithTrace(ctx) defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { @@ -141,29 +141,29 @@ func (base *Base) Move(sourcePath string, destPath string) error { return storagedriver.InvalidPathError{Path: destPath} } - return base.StorageDriver.Move(sourcePath, destPath) + return base.StorageDriver.Move(ctx, sourcePath, destPath) } // Delete wraps Delete of underlying storage driver. -func (base *Base) Delete(path string) error { - _, done := context.WithTrace(context.Background()) +func (base *Base) Delete(ctx context.Context, path string) error { + ctx, done := context.WithTrace(ctx) defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.Delete(path) + return base.StorageDriver.Delete(ctx, path) } // URLFor wraps URLFor of underlying storage driver. -func (base *Base) URLFor(path string, options map[string]interface{}) (string, error) { - _, done := context.WithTrace(context.Background()) +func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + ctx, done := context.WithTrace(ctx) defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path} } - return base.StorageDriver.URLFor(path, options) + return base.StorageDriver.URLFor(ctx, path, options) } diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 9ffe0888..82960314 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -9,6 +9,7 @@ import ( "path" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -76,8 +77,8 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { - rc, err := d.ReadStream(path, 0) +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } @@ -92,8 +93,8 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(subPath string, contents []byte) error { - if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { +func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { + if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { return err } @@ -102,7 +103,7 @@ func (d *driver) PutContent(subPath string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -126,7 +127,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.Reader at a location // designated by the given path. -func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { +func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { // TODO(stevvooe): This needs to be a requirement. // if !path.IsAbs(subPath) { // return fmt.Errorf("absolute path required: %q", subPath) @@ -162,7 +163,7 @@ func (d *driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { fullPath := d.fullPath(subPath) fi, err := os.Stat(fullPath) @@ -182,7 +183,7 @@ func (d *driver) Stat(subPath string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(subPath string) ([]string, error) { +func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { if subPath[len(subPath)-1] != '/' { subPath += "/" } @@ -213,7 +214,7 @@ func (d *driver) List(subPath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { source := d.fullPath(sourcePath) dest := d.fullPath(destPath) @@ -230,7 +231,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(subPath string) error { +func (d *driver) Delete(ctx context.Context, subPath string) error { fullPath := d.fullPath(subPath) _, err := os.Stat(fullPath) @@ -246,7 +247,7 @@ func (d *driver) Delete(subPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod } diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go index e0694de2..2d121e1c 100644 --- a/docs/storage/driver/inmemory/driver.go +++ b/docs/storage/driver/inmemory/driver.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -69,11 +70,11 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - rc, err := d.ReadStream(path, 0) + rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(p string, contents []byte) error { +func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -102,7 +103,7 @@ func (d *driver) PutContent(p string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -126,7 +127,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { d.mutex.Lock() defer d.mutex.Unlock() @@ -167,7 +168,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (nn in } // Stat returns info about the provided path. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -193,7 +194,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -223,7 +224,7 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -239,7 +240,7 @@ func (d *driver) Move(sourcePath string, destPath string) error { } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { d.mutex.Lock() defer d.mutex.Unlock() @@ -256,6 +257,6 @@ func (d *driver) Delete(path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod } diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index aee068a5..31c00afc 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -98,12 +98,12 @@ type S3BucketKeyer interface { // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. -func (lh *cloudFrontStorageMiddleware) URLFor(path string, options map[string]interface{}) (string, error) { +func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 keyer, ok := lh.StorageDriver.(S3BucketKeyer) if !ok { - context.GetLogger(context.Background()).Warn("the CloudFront middleware does not support this backend storage driver") - return lh.StorageDriver.URLFor(path, options) + context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + return lh.StorageDriver.URLFor(ctx, path, options) } cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index fe23262e..f6e7900e 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -29,6 +29,8 @@ import ( "github.com/AdRoll/goamz/aws" "github.com/AdRoll/goamz/s3" "github.com/Sirupsen/logrus" + + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -267,7 +269,7 @@ func (d *driver) Name() string { } // GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(path string) ([]byte, error) { +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Bucket.Get(d.s3Path(path)) if err != nil { return nil, parseError(path, err) @@ -276,13 +278,13 @@ func (d *driver) GetContent(path string) ([]byte, error) { } // PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(path string, contents []byte) error { +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") @@ -304,7 +306,7 @@ func (d *driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // returned. May be used to resume writing a stream by providing a nonzero // offset. Offsets past the current size will write from the position // beyond the end of the file. -func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (totalRead int64, err error) { +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { partNumber := 1 bytesRead := 0 var putErrChan chan error @@ -348,7 +350,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total // Fills from 0 to total from current fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(path, 0) + current, err := d.ReadStream(ctx, path, 0) if err != nil { return err } @@ -628,7 +630,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. -func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) if err != nil { return nil, err @@ -661,7 +663,7 @@ func (d *driver) Stat(path string) (storagedriver.FileInfo, error) { } // List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(path string) ([]string, error) { +func (d *driver) List(ctx context.Context, path string) ([]string, error) { if path != "/" && path[len(path)-1] != '/' { path = path + "/" } @@ -706,7 +708,7 @@ func (d *driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. -func (d *driver) Move(sourcePath string, destPath string) error { +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) @@ -714,11 +716,11 @@ func (d *driver) Move(sourcePath string, destPath string) error { return parseError(sourcePath, err) } - return d.Delete(sourcePath) + return d.Delete(ctx, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(path string) error { +func (d *driver) Delete(ctx context.Context, path string) error { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} @@ -747,7 +749,7 @@ func (d *driver) Delete(path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { methodString := "GET" method, ok := options["method"] if ok { diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index 69543bcb..c608e454 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/AdRoll/goamz/aws" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" @@ -134,16 +135,17 @@ func (suite *S3DriverSuite) TestEmptyRootList(c *check.C) { filename := "/test" contents := []byte("contents") - err = rootedDriver.PutContent(filename, contents) + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) c.Assert(err, check.IsNil) - defer rootedDriver.Delete(filename) + defer rootedDriver.Delete(ctx, filename) - keys, err := emptyRootDriver.List("/") + keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) } - keys, err = slashRootDriver.List("/") + keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) } diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index cda1c37d..bade099f 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -7,6 +7,8 @@ import ( "regexp" "strconv" "strings" + + "github.com/docker/distribution/context" ) // Version is a string representing the storage driver version, of the form @@ -42,45 +44,45 @@ type StorageDriver interface { // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. - GetContent(path string) ([]byte, error) + GetContent(ctx context.Context, path string) ([]byte, error) // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. - PutContent(path string, content []byte) error + PutContent(ctx context.Context, path string, content []byte) error // ReadStream retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(path string, offset int64) (io.ReadCloser, error) + ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. - WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) + WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. - Stat(path string) (FileInfo, error) + Stat(ctx context.Context, path string) (FileInfo, error) // List returns a list of the objects that are direct descendants of the //given path. - List(path string) ([]string, error) + List(ctx context.Context, path string) ([]string, error) // Move moves an object stored at sourcePath to destPath, removing the // original object. // Note: This may be no more efficient than a copy followed by a delete for // many implementations. - Move(sourcePath string, destPath string) error + Move(ctx context.Context, sourcePath string, destPath string) error // Delete recursively deletes all objects stored at "path" and its subpaths. - Delete(path string) error + Delete(ctx context.Context, path string) error // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // May return an ErrUnsupportedMethod in certain StorageDriver // implementations. - URLFor(path string, options map[string]interface{}) (string, error) + URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) } // PathRegexp is the regular expression which each file path must match. A diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 9f387a62..9185ebbc 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "gopkg.in/check.v1" ) @@ -27,6 +28,7 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC check.Suite(&DriverSuite{ Constructor: driverConstructor, SkipCheck: skipCheck, + ctx: context.Background(), }) } @@ -88,6 +90,7 @@ type DriverSuite struct { Teardown DriverTeardown SkipCheck storagedriver.StorageDriver + ctx context.Context } // SetUpSuite sets up the gocheck test suite. @@ -112,7 +115,7 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) { // This causes the suite to abort if any files are left around in the storage // driver. func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List("/") + files, _ := suite.StorageDriver.List(suite.ctx, "/") if len(files) > 0 { c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) } @@ -141,11 +144,11 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { "/Abc/Cba"} for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(filename) + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } @@ -164,12 +167,12 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { "/abc_123/"} for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(filename, contents) - defer suite.StorageDriver.Delete(firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) } @@ -225,7 +228,7 @@ func (suite *DriverSuite) TestTruncate(c *check.C) { // TestReadNonexistent tests reading content from an empty path. func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(filename) + _, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -277,17 +280,17 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) checksum := sha1.New() var fileSize int64 = 5 * 1024 * 1024 * 1024 contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(filename, 0, io.TeeReader(contents, checksum)) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, fileSize) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) writtenChecksum := sha1.New() @@ -300,7 +303,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) chunkSize := int64(32) @@ -308,10 +311,10 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { contentsChunk2 := randomContents(chunkSize) contentsChunk3 := randomContents(chunkSize) - err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) c.Assert(err, check.IsNil) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -320,7 +323,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) c.Assert(err, check.IsNil) defer reader.Close() @@ -329,7 +332,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) c.Assert(err, check.IsNil) defer reader.Close() @@ -338,7 +341,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, contentsChunk3) // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(filename, -1) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) @@ -346,7 +349,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { // Read past the end of the content and make sure we get a reader that // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) c.Assert(err, check.IsNil) defer reader.Close() @@ -356,7 +359,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(n, check.Equals, 0) // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3-1) + reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) c.Assert(err, check.IsNil) defer reader.Close() @@ -389,7 +392,7 @@ func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) @@ -399,39 +402,39 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contentsChunk1)) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - fi, err := suite.StorageDriver.Stat(filename) + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) - nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - received, err := suite.StorageDriver.GetContent(filename) + received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, fullContents) @@ -443,16 +446,16 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) fullContents = append(fullContents, zeroChunk...) fullContents = append(fullContents, contentsChunk4...) - nn, err = suite.StorageDriver.WriteStream(filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, chunkSize) - fi, err = suite.StorageDriver.Stat(filename) + fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - received, err = suite.StorageDriver.GetContent(filename) + received, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(len(received), check.Equals, len(fullContents)) c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) @@ -460,7 +463,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(received, check.DeepEquals, fullContents) // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(filename, -1, bytes.NewReader(zeroChunk)) + nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) @@ -472,11 +475,11 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomPath(32) - _, err := suite.StorageDriver.ReadStream(filename, 0) + _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.ReadStream(filename, 64) + _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -484,27 +487,27 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(rootDirectory) + defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles[i] = childFile - err := suite.StorageDriver.PutContent(childFile, randomContents(32)) + err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) c.Assert(err, check.IsNil) } sort.Strings(childFiles) - keys, err := suite.StorageDriver.List("/") + keys, err := suite.StorageDriver.List(suite.ctx, "/") c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - keys, err = suite.StorageDriver.List(rootDirectory) + keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - keys, err = suite.StorageDriver.List(parentDirectory) + keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) c.Assert(err, check.IsNil) sort.Strings(keys) @@ -523,20 +526,20 @@ func (suite *DriverSuite) TestMove(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(sourcePath, contents) + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) - _, err = suite.StorageDriver.GetContent(sourcePath) + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -549,23 +552,23 @@ func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { sourceContents := randomContents(32) destContents := randomContents(64) - defer suite.StorageDriver.Delete(firstPart(sourcePath)) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(sourcePath, sourceContents) + err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(destPath, destContents) + err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, sourceContents) - _, err = suite.StorageDriver.GetContent(sourcePath) + _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -577,16 +580,16 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(destPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - err := suite.StorageDriver.PutContent(destPath, contents) + err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Move(sourcePath, destPath) + err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - received, err := suite.StorageDriver.GetContent(destPath) + received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } @@ -596,12 +599,12 @@ func (suite *DriverSuite) TestMoveInvalid(c *check.C) { contents := randomContents(32) // Create a regular file. - err := suite.StorageDriver.PutContent("/notadir", contents) + err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete("/notadir") + defer suite.StorageDriver.Delete(suite.ctx, "/notadir") // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move("/notadir/foo", "/notadir/bar") + err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") c.Assert(err, check.NotNil) // non-nil error } @@ -611,15 +614,15 @@ func (suite *DriverSuite) TestDelete(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(filename) + err = suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -630,12 +633,12 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - url, err := suite.StorageDriver.URLFor(filename, nil) + url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) if err == storagedriver.ErrUnsupportedMethod { return } @@ -649,7 +652,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(err, check.IsNil) c.Assert(read, check.DeepEquals, contents) - url, err = suite.StorageDriver.URLFor(filename, map[string]interface{}{"method": "HEAD"}) + url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) if err == storagedriver.ErrUnsupportedMethod { return } @@ -663,7 +666,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { // TestDeleteNonexistent checks that removing a nonexistent key fails. func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomPath(32) - err := suite.StorageDriver.Delete(filename) + err := suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -676,42 +679,42 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { filename3 := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(firstPart(dirname)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) - err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.PutContent(path.Join(dirname, filename3), contents) + err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(path.Join(dirname, filename1)) + err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.IsNil) - err = suite.StorageDriver.Delete(dirname) + err = suite.StorageDriver.Delete(suite.ctx, dirname) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } @@ -723,24 +726,24 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) - defer suite.StorageDriver.Delete(firstPart(dirPath)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(dirPath) + fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) - err = suite.StorageDriver.PutContent(filePath, content) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, filePath) @@ -751,9 +754,9 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { // Sleep and modify the file time.Sleep(time.Second * 10) content = randomContents(4096) - err = suite.StorageDriver.PutContent(filePath, content) + err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(filePath) + fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) @@ -768,7 +771,7 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { } // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(dirPath) + fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, dirPath) @@ -784,15 +787,15 @@ func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { filename := randomPath(32) contents := randomContents(4096) - defer suite.StorageDriver.Delete(firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(filename, contents) + err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - readContents, err := suite.StorageDriver.GetContent(filename) + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } @@ -810,9 +813,9 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { filename := randomPath(32) contents := randomContents(filesize) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) var wg sync.WaitGroup @@ -820,7 +823,7 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { readContents := func() { defer wg.Done() offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(filename, offset) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) @@ -872,7 +875,7 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) var offset int64 var misswrites int @@ -880,17 +883,17 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { for i := 0; i < 1024; i++ { contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(filename, offset, bytes.NewReader(contents)) + read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) c.Assert(err, check.IsNil) - fi, err := suite.StorageDriver.Stat(filename) + fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) // We are most concerned with being able to read data as soon as Stat declares // it is uploaded. This is the strongest guarantee that some drivers (that guarantee // at best eventual consistency) absolutely need to provide. if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(filename, offset) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) @@ -937,15 +940,15 @@ func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(filename, randomContents(size)) + err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) c.Assert(err, check.IsNil) - _, err = suite.StorageDriver.GetContent(filename) + _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) } } @@ -975,16 +978,16 @@ func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(randomContents(size))) + written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, size) - rc, err := suite.StorageDriver.ReadStream(filename, 0) + rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) rc.Close() } @@ -1004,17 +1007,17 @@ func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { parentDir := randomPath(8) defer func() { c.StopTimer() - suite.StorageDriver.Delete(firstPart(parentDir)) + suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.ResetTimer() for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(parentDir) + files, err := suite.StorageDriver.List(suite.ctx, parentDir) c.Assert(err, check.IsNil) c.Assert(int64(len(files)), check.Equals, numFiles) } @@ -1033,17 +1036,17 @@ func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { for i := 0; i < c.N; i++ { parentDir := randomPath(8) - defer suite.StorageDriver.Delete(firstPart(parentDir)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) c.StopTimer() for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.StartTimer() // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(firstPart(parentDir)) + err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) c.Assert(err, check.IsNil) } } @@ -1055,7 +1058,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { defer tf.Close() filename := randomPath(32) - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) contents := randomContents(size) @@ -1065,11 +1068,11 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf.Sync() tf.Seek(0, os.SEEK_SET) - nn, err := suite.StorageDriver.WriteStream(filename, 0, tf) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, size) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -1080,25 +1083,25 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(filename, contents) + err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) - readContents, err := suite.StorageDriver.GetContent(filename) + readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(firstPart(filename)) + defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) + nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(filename, 0) + reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index 65d4347f..72d58f8a 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -9,6 +9,7 @@ import ( "os" "time" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -25,6 +26,8 @@ const fileReaderBufferSize = 4 << 20 type fileReader struct { driver storagedriver.StorageDriver + ctx context.Context + // identifying fields path string size int64 // size is the total size, must be set. @@ -40,14 +43,15 @@ type fileReader struct { // newFileReader initializes a file reader for the remote file. The read takes // on the offset and size at the time the reader is created. If the underlying // file changes, one must create a new fileReader. -func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string) (*fileReader, error) { rd := &fileReader{ driver: driver, path: path, + ctx: ctx, } // Grab the size of the layer file, ensuring existence. - if fi, err := driver.Stat(path); err != nil { + if fi, err := driver.Stat(ctx, path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is not @@ -141,7 +145,7 @@ func (fr *fileReader) reader() (io.Reader, error) { } // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.path, fr.offset) + rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go index 8a077603..c48bf16d 100644 --- a/docs/storage/filereader_test.go +++ b/docs/storage/filereader_test.go @@ -8,12 +8,13 @@ import ( "os" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver/inmemory" ) func TestSimpleRead(t *testing.T) { + ctx := context.Background() content := make([]byte, 1<<20) n, err := rand.Read(content) if err != nil { @@ -21,7 +22,7 @@ func TestSimpleRead(t *testing.T) { } if n != len(content) { - t.Fatalf("random read did't fill buffer") + t.Fatalf("random read didn't fill buffer") } dgst, err := digest.FromReader(bytes.NewReader(content)) @@ -32,11 +33,11 @@ func TestSimpleRead(t *testing.T) { driver := inmemory.New() path := "/random" - if err := driver.PutContent(path, content); err != nil { + if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path) if err != nil { t.Fatalf("error allocating file reader: %v", err) } @@ -59,12 +60,13 @@ func TestFileReaderSeek(t *testing.T) { repititions := 1024 path := "/patterned" content := bytes.Repeat([]byte(pattern), repititions) + ctx := context.Background() - if err := driver.PutContent(path, content); err != nil { + if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating file reader: %v", err) @@ -160,7 +162,7 @@ func TestFileReaderSeek(t *testing.T) { // read method, with an io.EOF error. func TestFileReaderNonExistentFile(t *testing.T) { driver := inmemory.New() - fr, err := newFileReader(driver, "/doesnotexist") + fr, err := newFileReader(context.Background(), driver, "/doesnotexist") if err != nil { t.Fatalf("unexpected error initializing reader: %v", err) } diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go index 5f22142e..95930f1d 100644 --- a/docs/storage/filewriter.go +++ b/docs/storage/filewriter.go @@ -7,6 +7,7 @@ import ( "io" "os" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -18,6 +19,8 @@ const ( type fileWriter struct { driver storagedriver.StorageDriver + ctx context.Context + // identifying fields path string @@ -45,13 +48,14 @@ var _ fileWriterInterface = &fileWriter{} // newFileWriter returns a prepared fileWriter for the driver and path. This // could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { +func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { fw := fileWriter{ driver: driver, path: path, + ctx: ctx, } - if fi, err := driver.Stat(path); err != nil { + if fi, err := driver.Stat(ctx, path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero @@ -179,7 +183,7 @@ func (fw *fileWriter) readFromAt(r io.Reader, offset int64) (n int64, err error) updateOffset = true } - nn, err := fw.driver.WriteStream(fw.path, offset, r) + nn, err := fw.driver.WriteStream(fw.ctx, fw.path, offset, r) if updateOffset { // We should forward the offset, whether or not there was an error. diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index a8ea6241..720e9385 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -7,6 +7,7 @@ import ( "os" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -32,8 +33,9 @@ func TestSimpleWrite(t *testing.T) { driver := inmemory.New() path := "/random" + ctx := context.Background() - fw, err := newFileWriter(driver, path) + fw, err := newFileWriter(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } @@ -49,7 +51,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) } - fr, err := newFileReader(driver, path) + fr, err := newFileReader(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -92,7 +94,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("writeat was short: %d != %d", n, len(content)) } - fr, err = newFileReader(driver, path) + fr, err = newFileReader(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -122,13 +124,13 @@ func TestSimpleWrite(t *testing.T) { // Now, we copy from one path to another, running the data through the // fileReader to fileWriter, rather than the driver.Move command to ensure // everything is working correctly. - fr, err = newFileReader(driver, path) + fr, err = newFileReader(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() - fw, err = newFileWriter(driver, "/copied") + fw, err = newFileWriter(ctx, driver, "/copied") if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } @@ -143,7 +145,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) } - fr, err = newFileReader(driver, "/copied") + fr, err = newFileReader(ctx, driver, "/copied") if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -162,7 +164,8 @@ func TestSimpleWrite(t *testing.T) { } func TestBufferedFileWriter(t *testing.T) { - writer, err := newFileWriter(inmemory.New(), "/random") + ctx := context.Background() + writer, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) @@ -203,8 +206,8 @@ func BenchmarkFileWriter(b *testing.B) { driver: inmemory.New(), path: "/random", } - - if fi, err := fw.driver.Stat(fw.path); err != nil { + ctx := context.Background() + if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero @@ -236,8 +239,9 @@ func BenchmarkFileWriter(b *testing.B) { func BenchmarkBufferedFileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take + ctx := context.Background() for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(inmemory.New(), "/random") + bfw, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) diff --git a/docs/storage/layer_test.go b/docs/storage/layer_test.go index f25018da..2ea99813 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/layer_test.go @@ -10,12 +10,12 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" - "golang.org/x/net/context" ) // TestSimpleLayerUpload covers the layer upload process, exercising common @@ -36,7 +36,7 @@ func TestSimpleLayerUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -144,7 +144,7 @@ func TestSimpleLayerRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -253,7 +253,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -353,7 +353,8 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, digest: dgst, }) - if err := driver.PutContent(blobPath, p); err != nil { + ctx := context.Background() + if err := driver.PutContent(ctx, blobPath, p); err != nil { return "", err } @@ -370,7 +371,7 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, return "", err } - if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil { + if err := driver.PutContent(ctx, layerLinkPath, []byte(dgst)); err != nil { return "", nil } diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index 40deba6a..ddca9741 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -54,7 +54,7 @@ func (lr *layerReader) Close() error { func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { var handlerFunc http.HandlerFunc - redirectURL, err := lr.fileReader.driver.URLFor(lr.path, map[string]interface{}{"method": r.Method}) + redirectURL, err := lr.fileReader.driver.URLFor(lr.ctx, lr.path, map[string]interface{}{"method": r.Method}) switch err { case nil: diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go index a86b668f..8da14ac7 100644 --- a/docs/storage/layerstore.go +++ b/docs/storage/layerstore.go @@ -5,7 +5,7 @@ import ( "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -16,7 +16,7 @@ type layerStore struct { } func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") + context.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") // Because this implementation just follows blob links, an existence check // is pretty cheap by starting and closing a fetch. @@ -35,13 +35,14 @@ func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { } func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Fetch") + ctx := ls.repository.ctx + context.GetLogger(ctx).Debug("(*layerStore).Fetch") bp, err := ls.path(dgst) if err != nil { return nil, err } - fr, err := newFileReader(ls.repository.driver, bp) + fr, err := newFileReader(ctx, ls.repository.driver, bp) if err != nil { return nil, err } @@ -56,7 +57,8 @@ func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { // is already in progress or the layer has already been uploaded, this // will return an error. func (ls *layerStore) Upload() (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Upload") + ctx := ls.repository.ctx + context.GetLogger(ctx).Debug("(*layerStore).Upload") // NOTE(stevvooe): Consider the issues with allowing concurrent upload of // the same two layers. Should it be disallowed? For now, we allow both @@ -84,7 +86,7 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) { } // Write a startedat file for this upload - if err := ls.repository.driver.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + if err := ls.repository.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { return nil, err } @@ -94,7 +96,9 @@ func (ls *layerStore) Upload() (distribution.LayerUpload, error) { // Resume continues an in progress layer upload, returning the current // state of the upload. func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { - ctxu.GetLogger(ls.repository.ctx).Debug("(*layerStore).Resume") + ctx := ls.repository.ctx + context.GetLogger(ctx).Debug("(*layerStore).Resume") + startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ name: ls.repository.Name(), uuid: uuid, @@ -104,7 +108,7 @@ func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { return nil, err } - startedAtBytes, err := ls.repository.driver.GetContent(startedAtPath) + startedAtBytes, err := ls.repository.driver.GetContent(ctx, startedAtPath) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -133,7 +137,7 @@ func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { // newLayerUpload allocates a new upload controller with the given state. func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { - fw, err := newFileWriter(ls.repository.driver, path) + fw, err := newFileWriter(ls.repository.ctx, ls.repository.driver, path) if err != nil { return nil, err } diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go index adf68ca9..a2672fe6 100644 --- a/docs/storage/layerwriter.go +++ b/docs/storage/layerwriter.go @@ -10,7 +10,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -47,7 +47,7 @@ func (lw *layerWriter) StartedAt() time.Time { // contents of the uploaded layer. The checksum should be provided in the // format :. func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") + context.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") if err := lw.bufferedFileWriter.Close(); err != nil { return nil, err @@ -67,7 +67,7 @@ func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { break } - ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries). + context.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries). Errorf("error validating layer: %v", err) if retries < 3 { @@ -98,7 +98,7 @@ func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { // Cancel the layer upload process. func (lw *layerWriter) Cancel() error { - ctxu.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") + context.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") if err := lw.removeResources(); err != nil { return err } @@ -168,7 +168,7 @@ func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { return nil, err } - paths, err := lw.driver.List(uploadHashStatePathPrefix) + paths, err := lw.driver.List(lw.layerStore.repository.ctx, uploadHashStatePathPrefix) if err != nil { if _, ok := err.(storagedriver.PathNotFoundError); !ok { return nil, err @@ -214,6 +214,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) } + ctx := lw.layerStore.repository.ctx // Find the highest stored hashState with offset less than or equal to // the requested offset. for _, hashState := range hashStates { @@ -229,7 +230,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { // is probably okay to skip for now since we don't expect anyone to // use the API in this way. For that reason, we don't treat an // an error here as a fatal error, but only log it. - if err := lw.driver.Delete(hashState.path); err != nil { + if err := lw.driver.Delete(ctx, hashState.path); err != nil { logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) } } @@ -239,7 +240,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { // No need to load any state, just reset the hasher. lw.resumableDigester.Reset() } else { - storedState, err := lw.driver.GetContent(hashStateMatch.path) + storedState, err := lw.driver.GetContent(ctx, hashStateMatch.path) if err != nil { return err } @@ -251,9 +252,8 @@ func (lw *layerWriter) resumeHashAt(offset int64) error { // Mind the gap. if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired - // offset. - fr, err := newFileReader(lw.driver, lw.path) + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, lw.driver, lw.path) if err != nil { return err } @@ -286,7 +286,7 @@ func (lw *layerWriter) storeHashState() error { return err } - return lw.driver.PutContent(uploadHashStatePath, hashState) + return lw.driver.PutContent(lw.layerStore.repository.ctx, uploadHashStatePath, hashState) } // validateLayer checks the layer data against the digest, returning an error @@ -329,7 +329,7 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) } // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.bufferedFileWriter.driver, lw.path) + fr, err := newFileReader(lw.layerStore.repository.ctx, lw.bufferedFileWriter.driver, lw.path) if err != nil { return "", err } @@ -345,7 +345,7 @@ func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) } if !verified { - ctxu.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst). + context.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst). Errorf("canonical digest does match provided digest") return "", distribution.ErrLayerInvalidDigest{ Digest: dgst, @@ -368,8 +368,9 @@ func (lw *layerWriter) moveLayer(dgst digest.Digest) error { return err } + ctx := lw.layerStore.repository.ctx // Check for existence - if _, err := lw.driver.Stat(blobPath); err != nil { + if _, err := lw.driver.Stat(ctx, blobPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // ensure that it doesn't exist. @@ -388,7 +389,7 @@ func (lw *layerWriter) moveLayer(dgst digest.Digest) error { // the size here and write a zero-length file to blobPath if this is the // case. For the most part, this should only ever happen with zero-length // tars. - if _, err := lw.driver.Stat(lw.path); err != nil { + if _, err := lw.driver.Stat(ctx, lw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // HACK(stevvooe): This is slightly dangerous: if we verify above, @@ -397,7 +398,7 @@ func (lw *layerWriter) moveLayer(dgst digest.Digest) error { // prevent this horrid thing, we employ the hack of only allowing // to this happen for the zero tarsum. if dgst == digest.DigestSha256EmptyTar { - return lw.driver.PutContent(blobPath, []byte{}) + return lw.driver.PutContent(ctx, blobPath, []byte{}) } // We let this fail during the move below. @@ -409,7 +410,7 @@ func (lw *layerWriter) moveLayer(dgst digest.Digest) error { } } - return lw.driver.Move(lw.path, blobPath) + return lw.driver.Move(ctx, lw.path, blobPath) } // linkLayer links a valid, written layer blob into the registry under the @@ -435,7 +436,8 @@ func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Dige return err } - if err := lw.layerStore.repository.driver.PutContent(layerLinkPath, []byte(canonical)); err != nil { + ctx := lw.layerStore.repository.ctx + if err := lw.layerStore.repository.driver.PutContent(ctx, layerLinkPath, []byte(canonical)); err != nil { return err } } @@ -459,8 +461,7 @@ func (lw *layerWriter) removeResources() error { // Resolve and delete the containing directory, which should include any // upload related files. dirPath := path.Dir(dataPath) - - if err := lw.driver.Delete(dirPath); err != nil { + if err := lw.driver.Delete(lw.layerStore.repository.ctx, dirPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // already gone! diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index a70789d3..3bafb997 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -30,7 +30,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) repo, err := registry.Repository(ctx, name) if err != nil { diff --git a/docs/storage/purgeuploads.go b/docs/storage/purgeuploads.go index 13c468de..cf723070 100644 --- a/docs/storage/purgeuploads.go +++ b/docs/storage/purgeuploads.go @@ -7,6 +7,7 @@ import ( "code.google.com/p/go-uuid/uuid" log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" ) @@ -28,9 +29,9 @@ func newUploadData() uploadData { // PurgeUploads deletes files from the upload directory // created before olderThan. The list of files deleted and errors // encountered are returned -func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { +func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) - uploadData, errors := getOutstandingUploads(driver) + uploadData, errors := getOutstandingUploads(ctx, driver) var deleted []string for _, uploadData := range uploadData { if uploadData.startedAt.Before(olderThan) { @@ -38,7 +39,7 @@ func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actua log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", uploadData.containingDir, uploadData.startedAt, olderThan) if actuallyDelete { - err = driver.Delete(uploadData.containingDir) + err = driver.Delete(ctx, uploadData.containingDir) } if err == nil { deleted = append(deleted, uploadData.containingDir) @@ -56,7 +57,7 @@ func PurgeUploads(driver storageDriver.StorageDriver, olderThan time.Time, actua // which could be eligible for deletion. The only reliable way to // classify the age of a file is with the date stored in the startedAt // file, so gather files by UUID with a date from startedAt. -func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploadData, []error) { +func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { var errors []error uploads := make(map[string]uploadData, 0) @@ -65,7 +66,7 @@ func getOutstandingUploads(driver storageDriver.StorageDriver) (map[string]uploa if err != nil { return uploads, append(errors, err) } - err = Walk(driver, root, func(fileInfo storageDriver.FileInfo) error { + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file[0] == '_' { @@ -124,7 +125,8 @@ func uUIDFromPath(path string) (string, bool) { // readStartedAtFile reads the date from an upload's startedAtFile func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { - startedAtBytes, err := driver.GetContent(path) + // todo:(richardscothern) - pass in a context + startedAtBytes, err := driver.GetContent(context.Background(), path) if err != nil { return time.Now(), err } diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go index 368e7c86..7c0f8813 100644 --- a/docs/storage/purgeuploads_test.go +++ b/docs/storage/purgeuploads_test.go @@ -7,26 +7,28 @@ import ( "time" "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) var pm = defaultPathMapper -func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) driver.StorageDriver { +func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() + ctx := context.Background() for i := 0; i < numUploads; i++ { - addUploads(t, d, uuid.New(), repoName, startedAt) + addUploads(ctx, t, d, uuid.New(), repoName, startedAt) } - return d + return d, ctx } -func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { +func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } - if err := d.PutContent(dataPath, []byte("")); err != nil { + if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { t.Fatalf("Unable to write data file") } @@ -35,7 +37,7 @@ func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, sta t.Fatalf("Unable to resolve path") } - if d.PutContent(startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { t.Fatalf("Unable to write startedAt file") } @@ -43,8 +45,8 @@ func addUploads(t *testing.T, d driver.StorageDriver, uploadID, repo string, sta func TestPurgeGather(t *testing.T) { uploadCount := 5 - fs := testUploadFS(t, uploadCount, "test-repo", time.Now()) - uploadData, errs := getOutstandingUploads(fs) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) + uploadData, errs := getOutstandingUploads(ctx, fs) if len(errs) != 0 { t.Errorf("Unexepected errors: %q", errs) } @@ -54,9 +56,9 @@ func TestPurgeGather(t *testing.T) { } func TestPurgeNone(t *testing.T) { - fs := testUploadFS(t, 10, "test-repo", time.Now()) + fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) oneHourAgo := time.Now().Add(-1 * time.Hour) - deleted, errs := PurgeUploads(fs, oneHourAgo, true) + deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } @@ -68,13 +70,13 @@ func TestPurgeNone(t *testing.T) { func TestPurgeAll(t *testing.T) { uploadCount := 10 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) // Ensure > 1 repos are purged - addUploads(t, fs, uuid.New(), "test-repo2", oneHourAgo) + addUploads(ctx, t, fs, uuid.New(), "test-repo2", oneHourAgo) uploadCount++ - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } @@ -88,15 +90,15 @@ func TestPurgeAll(t *testing.T) { func TestPurgeSome(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) newUploadCount := 4 for i := 0; i < newUploadCount; i++ { - addUploads(t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) + addUploads(ctx, t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } @@ -109,7 +111,7 @@ func TestPurgeSome(t *testing.T) { func TestPurgeOnlyUploads(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) + fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) // Create a directory tree outside _uploads and ensure // these files aren't deleted. @@ -123,11 +125,11 @@ func TestPurgeOnlyUploads(t *testing.T) { } nonUploadFile := path.Join(nonUploadPath, "file") - if err = fs.PutContent(nonUploadFile, []byte("")); err != nil { + if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { t.Fatalf("Unable to write data file") } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } @@ -140,13 +142,14 @@ func TestPurgeOnlyUploads(t *testing.T) { func TestPurgeMissingStartedAt(t *testing.T) { oneHourAgo := time.Now().Add(-1 * time.Hour) - fs := testUploadFS(t, 1, "test-repo", oneHourAgo) - err := Walk(fs, "/", func(fileInfo driver.FileInfo) error { + fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) + + err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file == "startedat" { - if err := fs.Delete(filePath); err != nil { + if err := fs.Delete(ctx, filePath); err != nil { t.Fatalf("Unable to delete startedat file: %s", filePath) } } @@ -155,7 +158,7 @@ func TestPurgeMissingStartedAt(t *testing.T) { if err != nil { t.Fatalf("Unexpected error during Walk: %s ", err.Error()) } - deleted, errs := PurgeUploads(fs, time.Now(), true) + deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) > 0 { t.Errorf("Unexpected errors") } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 1126db45..2834e5eb 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -20,10 +20,11 @@ type registry struct { // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { bs := &blobStore{ driver: driver, pm: defaultPathMapper, + ctx: ctx, } return ®istry{ diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index ac605360..066ce972 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -26,7 +26,7 @@ func (rs *revisionStore) exists(revision digest.Digest) (bool, error) { return false, err } - exists, err := exists(rs.driver, revpath) + exists, err := exists(rs.repository.ctx, rs.driver, revpath) if err != nil { return false, err } @@ -121,7 +121,7 @@ func (rs *revisionStore) link(revision digest.Digest) error { return err } - if exists, err := exists(rs.driver, revisionPath); err != nil { + if exists, err := exists(rs.repository.ctx, rs.driver, revisionPath); err != nil { return err } else if exists { // Revision has already been linked! @@ -142,5 +142,5 @@ func (rs *revisionStore) delete(revision digest.Digest) error { return err } - return rs.driver.Delete(revisionPath) + return rs.driver.Delete(rs.repository.ctx, revisionPath) } diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index 7094b69e..fcf6224f 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -30,7 +30,7 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { // can be eliminated by implementing listAll on drivers. signaturesPath = path.Join(signaturesPath, "sha256") - signaturePaths, err := s.driver.List(signaturesPath) + signaturePaths, err := s.driver.List(s.repository.ctx, signaturesPath) if err != nil { return nil, err } diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 616df952..882e6c35 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -4,6 +4,7 @@ import ( "path" "github.com/docker/distribution" + // "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -23,7 +24,7 @@ func (ts *tagStore) tags() ([]string, error) { } var tags []string - entries, err := ts.driver.List(p) + entries, err := ts.driver.List(ts.repository.ctx, p) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -52,7 +53,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { return false, err } - exists, err := exists(ts.driver, tagPath) + exists, err := exists(ts.repository.ctx, ts.driver, tagPath) if err != nil { return false, err } @@ -102,7 +103,7 @@ func (ts *tagStore) resolve(tag string) (digest.Digest, error) { return "", err } - if exists, err := exists(ts.driver, currentPath); err != nil { + if exists, err := exists(ts.repository.ctx, ts.driver, currentPath); err != nil { return "", err } else if !exists { return "", distribution.ErrManifestUnknown{Name: ts.Name(), Tag: tag} @@ -130,7 +131,7 @@ func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) { // TODO(stevvooe): Need to append digest alg to get listing of revisions. manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256") - entries, err := ts.driver.List(manifestTagIndexPath) + entries, err := ts.driver.List(ts.repository.ctx, manifestTagIndexPath) if err != nil { return nil, err } @@ -154,5 +155,5 @@ func (ts *tagStore) delete(tag string) error { return err } - return ts.driver.Delete(tagPath) + return ts.driver.Delete(ts.repository.ctx, tagPath) } diff --git a/docs/storage/walk.go b/docs/storage/walk.go index 7b958d87..8290f167 100644 --- a/docs/storage/walk.go +++ b/docs/storage/walk.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" ) @@ -20,13 +21,13 @@ type WalkFn func(fileInfo storageDriver.FileInfo) error // Walk traverses a filesystem defined within driver, starting // from the given path, calling f on each file -func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { - children, err := driver.List(from) +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) if err != nil { return err } for _, child := range children { - fileInfo, err := driver.Stat(child) + fileInfo, err := driver.Stat(ctx, child) if err != nil { return err } @@ -37,7 +38,7 @@ func Walk(driver storageDriver.StorageDriver, from string, f WalkFn) error { } if fileInfo.IsDir() && !skipDir { - Walk(driver, child, f) + Walk(ctx, driver, child, f) } } return nil diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go index 22b91b35..40b8547c 100644 --- a/docs/storage/walk_test.go +++ b/docs/storage/walk_test.go @@ -4,17 +4,19 @@ import ( "fmt" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) -func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { +func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { d := inmemory.New() c := []byte("") - if err := d.PutContent("/a/b/c/d", c); err != nil { + ctx := context.Background() + if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil { t.Fatalf("Unable to put to inmemory fs") } - if err := d.PutContent("/a/b/c/e", c); err != nil { + if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil { t.Fatalf("Unable to put to inmemory fs") } @@ -26,20 +28,20 @@ func testFS(t *testing.T) (driver.StorageDriver, map[string]string) { "/a/b/c/e": "file", } - return d, expected + return d, expected, ctx } func TestWalkErrors(t *testing.T) { - d, expected := testFS(t) + d, expected, ctx := testFS(t) fileCount := len(expected) - err := Walk(d, "", func(fileInfo driver.FileInfo) error { + err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { t.Error("Expected invalid root err") } - err = Walk(d, "/", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { // error on the 2nd file if fileInfo.Path() == "/a/b" { return fmt.Errorf("Early termination") @@ -54,7 +56,7 @@ func TestWalkErrors(t *testing.T) { t.Error(err.Error()) } - err = Walk(d, "/nonexistant", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { @@ -64,8 +66,8 @@ func TestWalkErrors(t *testing.T) { } func TestWalk(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() filetype, ok := expected[filePath] if !ok { @@ -93,8 +95,8 @@ func TestWalk(t *testing.T) { } func TestWalkSkipDir(t *testing.T) { - d, expected := testFS(t) - err := Walk(d, "/", func(fileInfo driver.FileInfo) error { + d, expected, ctx := testFS(t) + err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() if filePath == "/a/b" { // skip processing /a/b/c and /a/b/c/d From 80abf9fce0dbe43443d98d6efb42f03008866f1d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 28 Apr 2015 14:06:24 -0700 Subject: [PATCH 099/501] Use done channel to avoid goroutine leak This deals with a memory leak, caused by goroutines, experienced when using the s3 driver. Unfortunately, this section of the code leaks goroutines like a sieve. There is probably some refactoring that could be done to avoid this but instead, we have a done channel that will cause waiting goroutines to exit. Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index fe23262e..57871b5d 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -310,6 +310,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total var putErrChan chan error parts := []s3.Part{} var part s3.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) if err != nil { @@ -344,6 +345,7 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total } d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines }() // Fills from 0 to total from current @@ -407,7 +409,11 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total // the underlying s3 library should handle it, it doesn't seem to // be part of the shouldRetry function (see AdRoll/goamz/s3). defer func() { - putErrChan <- nil // for some reason, we do this no matter what. + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } }() if bytesRead <= 0 { @@ -449,7 +455,11 @@ func (d *driver) WriteStream(path string, offset int64, reader io.Reader) (total if err != nil { logrus.Errorf("error putting part, aborting: %v", err) - putErrChan <- err + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } } // parts and partNumber are safe, because this function is the From 6fbda8fa2690a15b09a51f68b00516ace337bacf Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 1 May 2015 17:13:11 -0700 Subject: [PATCH 100/501] Update API spec to reference digest instead of tarsum Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/api/v2/descriptors.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 833bff8b..0baa5ee7 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -135,7 +135,7 @@ const ( "tag": , "fsLayers": [ { - "blobSum": + "blobSum": "" }, ... ] @@ -606,7 +606,7 @@ var routeDescriptors = []RouteDescriptor{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { - "digest": + "digest": "" } }, ... @@ -712,7 +712,7 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameBlob, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", - Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.", + Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.", Methods: []MethodDescriptor{ { @@ -898,7 +898,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: "digest", Type: "query", - Format: "", + Format: "", Regexp: digest.DigestRegexp, Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, }, @@ -1173,7 +1173,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: "digest", Type: "string", - Format: "", + Format: "", Regexp: digest.DigestRegexp, Required: true, Description: `Digest of uploaded blob.`, From 7f3a57fdbb3fa63f5428a1f9b5cb9a60541ad84e Mon Sep 17 00:00:00 2001 From: Richard Date: Tue, 5 May 2015 14:21:33 -0700 Subject: [PATCH 101/501] Ensure the instrumentedResponseWriter correctly sets the http status in the context. Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index ab8187c1..3dd7e6ec 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -93,7 +93,7 @@ func TestURLPrefix(t *testing.T) { } -// TestLayerAPI conducts a full of the of the layer api. +// TestLayerAPI conducts a full test of the of the layer api. func TestLayerAPI(t *testing.T) { // TODO(stevvooe): This test code is complete junk but it should cover the // complete flow. This must be broken down and checked against the @@ -246,6 +246,16 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("response body did not pass verification") } + // ---------------- + // Fetch the layer with an invalid digest + badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + resp, err = http.Get(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + // Missing tests: // - Upload the same tarsum file under and different repository and // ensure the content remains uncorrupted. From 123546212c513cfd2651b52ef7bee73c5e85ee1d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 4 May 2015 08:56:37 -0700 Subject: [PATCH 102/501] Modify blob upload API - Ensures new uploads and resumed upload statuses always return an offset of 0. This allows future clients which support resumable upload to not attempt resumable upload on this version which does not support it. - Add PATCH support for streaming data on upload. - Add messaging to specification that PATCH with content range is currently not supported. - Update PUT blob to only support full data or no data, no more last chunk messaging as it was not supported. closes #470 Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/api/v2/descriptors.go | 106 ++++++++++++++++++++++++----------- docs/handlers/api_test.go | 76 +++++++++++++++++++++++++ docs/handlers/layerupload.go | 90 +++++++++++++++++++++-------- 3 files changed, 215 insertions(+), 57 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 0baa5ee7..d7c4a880 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -1055,7 +1055,74 @@ var routeDescriptors = []RouteDescriptor{ Description: "Upload a chunk of data for the specified upload.", Requests: []RequestDescriptor{ { - Description: "Upload a chunk of data to specified upload without completing the upload.", + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, @@ -1143,26 +1210,15 @@ var routeDescriptors = []RouteDescriptor{ Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", Requests: []RequestDescriptor{ { - // TODO(stevvooe): Break this down into three separate requests: - // 1. Complete an upload where all data has already been sent. - // 2. Complete an upload where the entire body is in the PUT. - // 3. Complete an upload where the final, partial chunk is the body. - - Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.", + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.", - }, { Name: "Content-Length", Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", }, }, PathParameters: []ParameterDescriptor{ @@ -1181,7 +1237,7 @@ var routeDescriptors = []RouteDescriptor{ }, Body: BodyDescriptor{ ContentType: "application/octet-stream", - Format: "", + Format: "", }, Successes: []ResponseDescriptor{ { @@ -1232,24 +1288,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - }, - }, }, }, }, diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 3dd7e6ec..1e31477f 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -209,6 +209,13 @@ func TestLayerAPI(t *testing.T) { uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + // ------------------------------------------ + // Now, push just a chunk + layerFile.Seek(0, 0) + + uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) + finishUpload(t, env.builder, imageName, uploadURLBase, dgst) // ------------------------ // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) @@ -616,6 +623,75 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, return resp.Header.Get("Location") } +func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string { + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + return resp.Header.Get("Location") +} + +func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ + "_state": u.Query()["_state"], + }.Encode() + + uploadURL := u.String() + + digester := digest.NewCanonicalDigester() + + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester)) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := http.DefaultClient.Do(req) + + return resp, digester.Digest(), err +} + +func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { + resp, dgst, err := doPushChunk(t, uploadURLBase, body) + if err != nil { + t.Fatalf("unexpected error doing push layer request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting chunk", resp, http.StatusAccepted) + + if err != nil { + t.Fatalf("error generating sha256 digest of body") + } + + checkHeaders(t, resp, http.Header{ + "Range": []string{fmt.Sprintf("0-%d", length-1)}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location"), dgst +} + func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { if resp.StatusCode != expectedStatus { t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go index 5cfa4554..1591d98d 100644 --- a/docs/handlers/layerupload.go +++ b/docs/handlers/layerupload.go @@ -23,11 +23,10 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(luh.StartLayerUpload), - "GET": http.HandlerFunc(luh.GetUploadStatus), - "HEAD": http.HandlerFunc(luh.GetUploadStatus), - // TODO(stevvooe): Must implement patch support. - // "PATCH": http.HandlerFunc(luh.PutLayerChunk), + "POST": http.HandlerFunc(luh.StartLayerUpload), + "GET": http.HandlerFunc(luh.GetUploadStatus), + "HEAD": http.HandlerFunc(luh.GetUploadStatus), + "PATCH": http.HandlerFunc(luh.PatchLayerData), "PUT": http.HandlerFunc(luh.PutLayerUploadComplete), "DELETE": http.HandlerFunc(luh.CancelLayerUpload), }) @@ -133,7 +132,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R luh.Upload = upload defer luh.Upload.Close() - if err := luh.layerUploadResponse(w, r); err != nil { + if err := luh.layerUploadResponse(w, r, true); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? luh.Errors.Push(v2.ErrorCodeUnknown, err) return @@ -151,7 +150,10 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re return } - if err := luh.layerUploadResponse(w, r); err != nil { + // TODO(dmcgowan): Set last argument to false in layerUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := luh.layerUploadResponse(w, r, true); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? luh.Errors.Push(v2.ErrorCodeUnknown, err) return @@ -161,11 +163,45 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re w.WriteHeader(http.StatusNoContent) } -// PutLayerUploadComplete takes the final request of a layer upload. The final -// chunk may include all the layer data, the final chunk of layer data or no -// layer data. Any data provided is received and verified. If successful, the -// layer is linked into the blob store and 201 Created is returned with the -// canonical url of the layer. +// PatchLayerData writes data to an upload. +func (luh *layerUploadHandler) PatchLayerData(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + w.WriteHeader(http.StatusBadRequest) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + // Copy the data + if _, err := io.Copy(luh.Upload, r.Body); err != nil { + ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + if err := luh.layerUploadResponse(w, r, false); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutLayerUploadComplete takes the final request of a layer upload. The +// request may include all the layer data or no layer data. Any data +// provided is received and verified. If successful, the layer is linked +// into the blob store and 201 Created is returned with the canonical +// url of the layer. func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) @@ -190,14 +226,11 @@ func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r * return } - // TODO(stevvooe): Check the incoming range header here, per the - // specification. LayerUpload should be seeked (sought?) to that position. - // TODO(stevvooe): Consider checking the error on this copy. // Theoretically, problems should be detected during verification but we // may miss a root cause. - // Read in the final chunk, if any. + // Read in the data, if any. if _, err := io.Copy(luh.Upload, r.Body); err != nil { ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) w.WriteHeader(http.StatusInternalServerError) @@ -260,13 +293,19 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. // layerUploadResponse provides a standard request for uploading layers and // chunk responses. This sets the correct headers but the response status is -// left to the caller. -func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { +// left to the caller. The fresh argument is used to ensure that new layer +// uploads always start at a 0 offset. This allows disabling resumable push +// by always returning a 0 offset on check status. +func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - offset, err := luh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) - return err + var offset int64 + if !fresh { + var err error + offset, err = luh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) + return err + } } // TODO(stevvooe): Need a better way to manage the upload state automatically. @@ -291,10 +330,15 @@ func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *htt return err } + endRange := offset + if endRange > 0 { + endRange = endRange - 1 + } + w.Header().Set("Docker-Upload-UUID", luh.UUID) w.Header().Set("Location", uploadURL) w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", luh.State.Offset)) + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) return nil } From b292f31d381c5de1882ff259c13c8bd605755bee Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Tue, 5 May 2015 11:25:42 +0300 Subject: [PATCH 103/501] [Server] Listen and serve on a unix socket Allow to use a unix socket as a listener. To specify an endpoint type we use an optional configuration field 'net', as there's no way to distinguish a relative socket path from a hostname. Signed-off-by: Anton Tiurin --- docs/listener/listener.go | 74 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 docs/listener/listener.go diff --git a/docs/listener/listener.go b/docs/listener/listener.go new file mode 100644 index 00000000..b93a7a63 --- /dev/null +++ b/docs/listener/listener.go @@ -0,0 +1,74 @@ +package listener + +import ( + "fmt" + "net" + "os" + "time" +) + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// it is a plain copy-paste from net/http/server.go +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// NewListener announces on laddr and net. Accepted values of the net are +// 'unix' and 'tcp' +func NewListener(net, laddr string) (net.Listener, error) { + switch net { + case "unix": + return newUnixListener(laddr) + case "tcp", "": // an empty net means tcp + return newTCPListener(laddr) + default: + return nil, fmt.Errorf("unknown address type %s", net) + } +} + +func newUnixListener(laddr string) (net.Listener, error) { + fi, err := os.Stat(laddr) + if err == nil { + // the file exists. + // try to remove it if it's a socket + if !isSocket(fi.Mode()) { + return nil, fmt.Errorf("file %s exists and is not a socket", laddr) + } + + if err := os.Remove(laddr); err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + // we can't do stat on the file. + // it means we can not remove it + return nil, err + } + + return net.Listen("unix", laddr) +} + +func isSocket(m os.FileMode) bool { + return m&os.ModeSocket != 0 +} + +func newTCPListener(laddr string) (net.Listener, error) { + ln, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil +} From 2db0327dc1cc11feb3b4135f723e9b6cda704c80 Mon Sep 17 00:00:00 2001 From: Richard Date: Tue, 12 May 2015 17:49:18 -0700 Subject: [PATCH 104/501] Set cache headers for layers. - Set an Etag header - Check If-None-Match and respond appropriately - Set a Cache-Control header with a default of 1 week Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 37 +++++++++++++++++++++++++++++++++++++ docs/storage/layerreader.go | 25 +++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 1e31477f..6dc7a422 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -263,6 +263,43 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) + // Cache headers + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{layerDigest.String()}, + "ETag": []string{layerDigest.String()}, + "Cache-Control": []string{"max-age=86400"}, + }) + + // Matching etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Non-matching etag, gives 200 + req, err = http.NewRequest("GET", layerURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", "") + resp, err = http.DefaultClient.Do(req) + checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) + // Missing tests: // - Upload the same tarsum file under and different repository and // ensure the content remains uncorrupted. diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go index ddca9741..044dab09 100644 --- a/docs/storage/layerreader.go +++ b/docs/storage/layerreader.go @@ -1,6 +1,7 @@ package storage import ( + "fmt" "net/http" "time" @@ -73,7 +74,31 @@ func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // If the registry is serving this content itself, check + // the If-None-Match header and return 304 on match. Redirected + // storage implementations do the same. + + if etagMatch(r, lr.digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + setCacheHeaders(w, 86400, lr.digest.String()) w.Header().Set("Docker-Content-Digest", lr.digest.String()) handlerFunc.ServeHTTP(w, r) }), nil } + +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag { + return true + } + } + return false +} + +func setCacheHeaders(w http.ResponseWriter, cacheAge int, etag string) { + w.Header().Set("ETag", etag) + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d", cacheAge)) + +} From 08401cfdd6586d23d76d6be89449872c33bb1ff7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 12 May 2015 00:10:29 -0700 Subject: [PATCH 105/501] Refactor Blob Service API This PR refactors the blob service API to be oriented around blob descriptors. Identified by digests, blobs become an abstract entity that can be read and written using a descriptor as a handle. This allows blobs to take many forms, such as a ReadSeekCloser or a simple byte buffer, allowing blob oriented operations to better integrate with blob agnostic APIs (such as the `io` package). The error definitions are now better organized to reflect conditions that can only be seen when interacting with the blob API. The main benefit of this is to separate the much smaller metadata from large file storage. Many benefits also follow from this. Reading and writing has been separated into discrete services. Backend implementation is also simplified, by reducing the amount of metadata that needs to be picked up to simply serve a read. This also improves cacheability. "Opening" a blob simply consists of an access check (Stat) and a path calculation. Caching is greatly simplified and we've made the mapping of provisional to canonical hashes a first-class concept. BlobDescriptorService and BlobProvider can be combined in different ways to achieve varying effects. Recommend Review Approach ------------------------- This is a very large patch. While apologies are in order, we are getting a considerable amount of refactoring. Most changes follow from the changes to the root package (distribution), so start there. From there, the main changes are in storage. Looking at (*repository).Blobs will help to understand the how the linkedBlobStore is wired. One can explore the internals within and also branch out into understanding the changes to the caching layer. Following the descriptions below will also help to guide you. To reduce the chances for regressions, it was critical that major changes to unit tests were avoided. Where possible, they are left untouched and where not, the spirit is hopefully captured. Pay particular attention to where behavior may have changed. Storage ------- The primary changes to the `storage` package, other than the interface updates, were to merge the layerstore and blobstore. Blob access is now layered even further. The first layer, blobStore, exposes a global `BlobStatter` and `BlobProvider`. Operations here provide a fast path for most read operations that don't take access control into account. The `linkedBlobStore` layers on top of the `blobStore`, providing repository- scoped blob link management in the backend. The `linkedBlobStore` implements the full `BlobStore` suite, providing access-controlled, repository-local blob writers. The abstraction between the two is slightly broken in that `linkedBlobStore` is the only channel under which one can write into the global blob store. The `linkedBlobStore` also provides flexibility in that it can act over different link sets depending on configuration. This allows us to use the same code for signature links, manifest links and blob links. Eventually, we will fully consolidate this storage. The improved cache flow comes from the `linkedBlobStatter` component of `linkedBlobStore`. Using a `cachedBlobStatter`, these combine together to provide a simple cache hierarchy that should streamline access checks on read and write operations, or at least provide a single path to optimize. The metrics have been changed in a slightly incompatible way since the former operations, Fetch and Exists, are no longer relevant. The fileWriter and fileReader have been slightly modified to support the rest of the changes. The most interesting is the removal of the `Stat` call from `newFileReader`. This was the source of unnecessary round trips that were only present to look up the size of the resulting reader. Now, one must simply pass in the size, requiring the caller to decide whether or not the `Stat` call is appropriate. In several cases, it turned out the caller already had the size already. The `WriterAt` implementation has been removed from `fileWriter`, since it is no longer required for `BlobWriter`, reducing the number of paths which writes may take. Cache ----- Unfortunately, the `cache` package required a near full rewrite. It was pretty mechanical in that the cache is oriented around the `BlobDescriptorService` slightly modified to include the ability to set the values for individual digests. While the implementation is oriented towards caching, it can act as a primary store. Provisions are in place to have repository local metadata, in addition to global metadata. Fallback is implemented as a part of the storage package to maintain this flexibility. One unfortunate side-effect is that caching is now repository-scoped, rather than global. This should have little effect on performance but may increase memory usage. Handlers -------- The `handlers` package has been updated to leverage the new API. For the most part, the changes are superficial or mechanical based on the API changes. This did expose a bug in the handling of provisional vs canonical digests that was fixed in the unit tests. Configuration ------------- One user-facing change has been made to the configuration and is updated in the associated documentation. The `layerinfo` cache parameter has been deprecated by the `blobdescriptor` cache parameter. Both are equivalent and configuration files should be backward compatible. Notifications ------------- Changes the `notification` package are simply to support the interface changes. Context ------- A small change has been made to the tracing log-level. Traces have been moved from "info" to "debug" level to reduce output when not needed. Signed-off-by: Stephen J Day --- docs/handlers/api_test.go | 21 +- docs/handlers/app.go | 24 +- docs/handlers/app_test.go | 2 +- docs/handlers/blob.go | 69 +++ docs/handlers/blobupload.go | 355 ++++++++++++++ docs/handlers/hmac.go | 14 +- docs/handlers/hmac_test.go | 12 +- docs/handlers/images.go | 6 +- docs/handlers/layer.go | 74 --- docs/handlers/layerupload.go | 344 ------------- docs/storage/{layer_test.go => blob_test.go} | 225 ++++----- docs/storage/blobserver.go | 72 +++ docs/storage/blobstore.go | 231 +++++---- docs/storage/blobwriter.go | 469 ++++++++++++++++++ docs/storage/blobwriter_nonresumable.go | 6 + docs/storage/blobwriter_resumable.go | 9 + docs/storage/cache/cache.go | 106 +--- docs/storage/cache/cache_test.go | 179 ++++--- docs/storage/cache/memory.go | 174 +++++-- docs/storage/cache/memory_test.go | 6 +- docs/storage/cache/redis.go | 238 ++++++--- docs/storage/cache/redis_test.go | 4 +- docs/storage/cachedblobdescriptorstore.go | 84 ++++ docs/storage/filereader.go | 53 +- docs/storage/filereader_test.go | 6 +- docs/storage/filewriter.go | 60 +-- docs/storage/filewriter_test.go | 24 +- docs/storage/layercache.go | 202 -------- docs/storage/layerreader.go | 104 ---- docs/storage/layerstore.go | 178 ------- docs/storage/layerwriter.go | 478 ------------------- docs/storage/layerwriter_nonresumable.go | 6 - docs/storage/layerwriter_resumable.go | 9 - docs/storage/linkedblobstore.go | 258 ++++++++++ docs/storage/manifeststore.go | 62 +-- docs/storage/manifeststore_test.go | 28 +- docs/storage/paths.go | 44 +- docs/storage/paths_test.go | 4 +- docs/storage/purgeuploads_test.go | 6 +- docs/storage/registry.go | 121 +++-- docs/storage/revisionstore.go | 118 ++--- docs/storage/signaturestore.go | 80 ++-- docs/storage/tagstore.go | 110 ++--- docs/storage/util.go | 21 + 44 files changed, 2426 insertions(+), 2270 deletions(-) create mode 100644 docs/handlers/blob.go create mode 100644 docs/handlers/blobupload.go delete mode 100644 docs/handlers/layer.go delete mode 100644 docs/handlers/layerupload.go rename docs/storage/{layer_test.go => blob_test.go} (56%) create mode 100644 docs/storage/blobserver.go create mode 100644 docs/storage/blobwriter.go create mode 100644 docs/storage/blobwriter_nonresumable.go create mode 100644 docs/storage/blobwriter_resumable.go create mode 100644 docs/storage/cachedblobdescriptorstore.go delete mode 100644 docs/storage/layercache.go delete mode 100644 docs/storage/layerreader.go delete mode 100644 docs/storage/layerstore.go delete mode 100644 docs/storage/layerwriter.go delete mode 100644 docs/storage/layerwriter_nonresumable.go delete mode 100644 docs/storage/layerwriter_resumable.go create mode 100644 docs/storage/linkedblobstore.go create mode 100644 docs/storage/util.go diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 6dc7a422..9b5027ba 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -93,8 +93,8 @@ func TestURLPrefix(t *testing.T) { } -// TestLayerAPI conducts a full test of the of the layer api. -func TestLayerAPI(t *testing.T) { +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { // TODO(stevvooe): This test code is complete junk but it should cover the // complete flow. This must be broken down and checked against the // specification *before* we submit the final to docker core. @@ -213,6 +213,13 @@ func TestLayerAPI(t *testing.T) { // Now, push just a chunk layerFile.Seek(0, 0) + canonicalDigester := digest.NewCanonicalDigester() + if _, err := io.Copy(canonicalDigester, layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + layerFile.Seek(0, 0) uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) @@ -226,7 +233,7 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "checking head on existing layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // ---------------- @@ -239,7 +246,7 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // Verify the body @@ -272,9 +279,9 @@ func TestLayerAPI(t *testing.T) { checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, - "Docker-Content-Digest": []string{layerDigest.String()}, - "ETag": []string{layerDigest.String()}, - "Cache-Control": []string{"max-age=86400"}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + "ETag": []string{canonicalDigest.String()}, + "Cache-Control": []string{"max-age=31536000"}, }) // Matching etag, gives 304 diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 40181afa..22c0b6de 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -67,9 +67,9 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App }) app.register(v2.RouteNameManifest, imageManifestDispatcher) app.register(v2.RouteNameTags, tagsDispatcher) - app.register(v2.RouteNameBlob, layerDispatcher) - app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) - app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) + app.register(v2.RouteNameBlob, blobDispatcher) + app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) var err error app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) @@ -103,18 +103,24 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { - switch cc["layerinfo"] { + v, ok := cc["blobdescriptor"] + if !ok { + // Backwards compatible: "layerinfo" == "blobdescriptor" + v = cc["layerinfo"] + } + + switch v { case "redis": if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewRedisLayerInfoCache(app.redis)) - ctxu.GetLogger(app).Infof("using redis layerinfo cache") + app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewRedisBlobDescriptorCacheProvider(app.redis)) + ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewInMemoryLayerInfoCache()) - ctxu.GetLogger(app).Infof("using inmemory layerinfo cache") + app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: - if cc["layerinfo"] != "" { + if v != "" { ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) } } diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 8ea5b1e5..03ea0c9c 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -30,7 +30,7 @@ func TestAppDispatcher(t *testing.T) { Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()), + registry: storage.NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go new file mode 100644 index 00000000..3237b195 --- /dev/null +++ b/docs/handlers/blob.go @@ -0,0 +1,69 @@ +package handlers + +import ( + "net/http" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobDispatcher uses the request context to build a blobHandler. +func blobDispatcher(ctx *Context, r *http.Request) http.Handler { + dgst, err := getDigest(ctx) + if err != nil { + + if err == errDigestNotAvailable { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + }) + } + + blobHandler := &blobHandler{ + Context: ctx, + Digest: dgst, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + } +} + +// blobHandler serves http blob requests. +type blobHandler struct { + *Context + + Digest digest.Digest +} + +// GetBlob fetches the binary data from backend storage returns it in the +// response. +func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("GetBlob") + blobs := bh.Repository.Blobs(bh) + desc, err := blobs.Stat(bh, bh.Digest) + if err != nil { + if err == distribution.ErrBlobUnknown { + w.WriteHeader(http.StatusNotFound) + bh.Errors.Push(v2.ErrorCodeBlobUnknown, bh.Digest) + } else { + bh.Errors.Push(v2.ErrorCodeUnknown, err) + } + return + } + + if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { + context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) + bh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } +} diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go new file mode 100644 index 00000000..99a75698 --- /dev/null +++ b/docs/handlers/blobupload.go @@ -0,0 +1,355 @@ +package handlers + +import ( + "fmt" + "io" + "net/http" + "net/url" + "os" + + "github.com/docker/distribution" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/gorilla/handlers" +) + +// blobUploadDispatcher constructs and returns the blob upload handler for the +// given request context. +func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + buh := &blobUploadHandler{ + Context: ctx, + UUID: getUploadUUID(ctx), + } + + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(buh.StartBlobUpload), + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + "PATCH": http.HandlerFunc(buh.PatchBlobData), + "PUT": http.HandlerFunc(buh.PutBlobUploadComplete), + "DELETE": http.HandlerFunc(buh.CancelBlobUpload), + }) + + if buh.UUID != "" { + state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + }) + } + buh.State = state + + if state.Name != ctx.Repository.Name() { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + }) + } + + if state.UUID != buh.UUID { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + }) + } + + blobs := ctx.Repository.Blobs(buh) + upload, err := blobs.Resume(buh, buh.UUID) + if err != nil { + ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + if err == distribution.ErrBlobUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + buh.Errors.Push(v2.ErrorCodeUnknown, err) + }) + } + buh.Upload = upload + + if state.Offset > 0 { + // Seek the blob upload to the correct spot if it's non-zero. + // These error conditions should be rare and demonstrate really + // problems. We basically cancel the upload and tell the client to + // start over. + if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + upload.Cancel(buh) + }) + } else if nn != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + upload.Cancel(buh) + }) + } + } + + handler = closeResources(handler, buh.Upload) + } + + return handler +} + +// blobUploadHandler handles the http blob upload process. +type blobUploadHandler struct { + *Context + + // UUID identifies the upload instance for the current request. Using UUID + // to key blob writers since this implementation uses UUIDs. + UUID string + + Upload distribution.BlobWriter + + State blobUploadState +} + +// StartBlobUpload begins the blob upload process and allocates a server-side +// blob writer session. +func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + blobs := buh.Repository.Blobs(buh) + upload, err := blobs.Create(buh) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + buh.Upload = upload + defer buh.Upload.Close() + + if err := buh.blobUploadResponse(w, r, true); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) + w.WriteHeader(http.StatusAccepted) +} + +// GetUploadStatus returns the status of a given upload, identified by id. +func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return + } + + // TODO(dmcgowan): Set last argument to false in blobUploadResponse when + // resumable upload is supported. This will enable returning a non-zero + // range for clients to begin uploading at an offset. + if err := buh.blobUploadResponse(w, r, true); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.WriteHeader(http.StatusNoContent) +} + +// PatchBlobData writes data to an upload. +func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return + } + + ct := r.Header.Get("Content-Type") + if ct != "" && ct != "application/octet-stream" { + w.WriteHeader(http.StatusBadRequest) + // TODO(dmcgowan): encode error + return + } + + // TODO(dmcgowan): support Content-Range header to seek and write range + + // Copy the data + if _, err := io.Copy(buh.Upload, r.Body); err != nil { + ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + if err := buh.blobUploadResponse(w, r, false); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// PutBlobUploadComplete takes the final request of a blob upload. The +// request may include all the blob data or no blob data. Any data +// provided is received and verified. If successful, the blob is linked +// into the blob store and 201 Created is returned with the canonical +// url of the blob. +func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return + } + + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + + if dgstStr == "" { + // no digest? return error, but allow retry. + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") + return + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + // no digest? return error, but allow retry. + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") + return + } + + // Read in the data, if any. + if _, err := io.Copy(buh.Upload, r.Body); err != nil { + ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + buh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } + + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ + Digest: dgst, + + // TODO(stevvooe): This isn't wildly important yet, but we should + // really set the length and mediatype. For now, we can let the + // backend take care of this. + }) + + if err != nil { + switch err := err.(type) { + case distribution.ErrBlobInvalidDigest: + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + default: + switch err { + case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: + w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + default: + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) + w.WriteHeader(http.StatusInternalServerError) + buh.Errors.Push(v2.ErrorCodeUnknown, err) + } + + } + + // Clean up the backend blob data if there was an error. + if err := buh.Upload.Cancel(buh); err != nil { + // If the cleanup fails, all we can do is observe and report. + ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + } + + return + } + + // Build our canonical blob url + blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + if err != nil { + buh.Errors.Push(v2.ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) +} + +// CancelBlobUpload cancels an in-progress upload of a blob. +func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { + if buh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + return + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + if err := buh.Upload.Cancel(buh); err != nil { + ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + buh.Errors.PushErr(err) + } + + w.WriteHeader(http.StatusNoContent) +} + +// blobUploadResponse provides a standard request for uploading blobs and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. The fresh argument is used to ensure that new blob +// uploads always start at a 0 offset. This allows disabling resumable push by +// always returning a 0 offset on check status. +func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { + + var offset int64 + if !fresh { + var err error + offset, err = buh.Upload.Seek(0, os.SEEK_CUR) + if err != nil { + ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) + return err + } + } + + // TODO(stevvooe): Need a better way to manage the upload state automatically. + buh.State.Name = buh.Repository.Name() + buh.State.UUID = buh.Upload.ID() + buh.State.Offset = offset + buh.State.StartedAt = buh.Upload.StartedAt() + + token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + return err + } + + uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( + buh.Repository.Name(), buh.Upload.ID(), + url.Values{ + "_state": []string{token}, + }) + if err != nil { + ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + return err + } + + endRange := offset + if endRange > 0 { + endRange = endRange - 1 + } + + w.Header().Set("Docker-Upload-UUID", buh.UUID) + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) + + return nil +} diff --git a/docs/handlers/hmac.go b/docs/handlers/hmac.go index e17ececa..1725d240 100644 --- a/docs/handlers/hmac.go +++ b/docs/handlers/hmac.go @@ -9,9 +9,9 @@ import ( "time" ) -// layerUploadState captures the state serializable state of the layer upload. -type layerUploadState struct { - // name is the primary repository under which the layer will be linked. +// blobUploadState captures the state serializable state of the blob upload. +type blobUploadState struct { + // name is the primary repository under which the blob will be linked. Name string // UUID identifies the upload. @@ -26,10 +26,10 @@ type layerUploadState struct { type hmacKey string -// unpackUploadState unpacks and validates the layer upload state from the +// unpackUploadState unpacks and validates the blob upload state from the // token, using the hmacKey secret. -func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) { - var state layerUploadState +func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { + var state blobUploadState tokenBytes, err := base64.URLEncoding.DecodeString(token) if err != nil { @@ -59,7 +59,7 @@ func (secret hmacKey) unpackUploadState(token string) (layerUploadState, error) // packUploadState packs the upload state signed with and hmac digest using // the hmacKey secret, encoding to url safe base64. The resulting token can be // used to share data with minimized risk of external tampering. -func (secret hmacKey) packUploadState(lus layerUploadState) (string, error) { +func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { mac := hmac.New(sha256.New, []byte(secret)) p, err := json.Marshal(lus) if err != nil { diff --git a/docs/handlers/hmac_test.go b/docs/handlers/hmac_test.go index cce2cd49..366c7279 100644 --- a/docs/handlers/hmac_test.go +++ b/docs/handlers/hmac_test.go @@ -2,7 +2,7 @@ package handlers import "testing" -var layerUploadStates = []layerUploadState{ +var blobUploadStates = []blobUploadState{ { Name: "hello", UUID: "abcd-1234-qwer-0987", @@ -45,7 +45,7 @@ var secrets = []string{ func TestLayerUploadTokens(t *testing.T) { secret := hmacKey("supersecret") - for _, testcase := range layerUploadStates { + for _, testcase := range blobUploadStates { token, err := secret.packUploadState(testcase) if err != nil { t.Fatal(err) @@ -56,7 +56,7 @@ func TestLayerUploadTokens(t *testing.T) { t.Fatal(err) } - assertLayerUploadStateEquals(t, testcase, lus) + assertBlobUploadStateEquals(t, testcase, lus) } } @@ -68,7 +68,7 @@ func TestHMACValidation(t *testing.T) { secret2 := hmacKey(secret) badSecret := hmacKey("DifferentSecret") - for _, testcase := range layerUploadStates { + for _, testcase := range blobUploadStates { token, err := secret1.packUploadState(testcase) if err != nil { t.Fatal(err) @@ -79,7 +79,7 @@ func TestHMACValidation(t *testing.T) { t.Fatal(err) } - assertLayerUploadStateEquals(t, testcase, lus) + assertBlobUploadStateEquals(t, testcase, lus) _, err = badSecret.unpackUploadState(token) if err == nil { @@ -104,7 +104,7 @@ func TestHMACValidation(t *testing.T) { } } -func assertLayerUploadStateEquals(t *testing.T, expected layerUploadState, received layerUploadState) { +func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { if expected.Name != received.Name { t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 174bd3d9..45029da5 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -136,14 +136,12 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http case distribution.ErrManifestVerification: for _, verificationError := range err { switch verificationError := verificationError.(type) { - case distribution.ErrUnknownLayer: - imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) + case distribution.ErrManifestBlobUnknown: + imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.Digest) case distribution.ErrManifestUnverified: imh.Errors.Push(v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { - // TODO(stevvooe): We need to really need to move all - // errors to types. Its much more straightforward. imh.Errors.Push(v2.ErrorCodeDigestInvalid) } else { imh.Errors.PushErr(verificationError) diff --git a/docs/handlers/layer.go b/docs/handlers/layer.go deleted file mode 100644 index 13ee8560..00000000 --- a/docs/handlers/layer.go +++ /dev/null @@ -1,74 +0,0 @@ -package handlers - -import ( - "net/http" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerDispatcher uses the request context to build a layerHandler. -func layerDispatcher(ctx *Context, r *http.Request) http.Handler { - dgst, err := getDigest(ctx) - if err != nil { - - if err == errDigestNotAvailable { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) - }) - } - - layerHandler := &layerHandler{ - Context: ctx, - Digest: dgst, - } - - return handlers.MethodHandler{ - "GET": http.HandlerFunc(layerHandler.GetLayer), - "HEAD": http.HandlerFunc(layerHandler.GetLayer), - } -} - -// layerHandler serves http layer requests. -type layerHandler struct { - *Context - - Digest digest.Digest -} - -// GetLayer fetches the binary data from backend storage returns it in the -// response. -func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { - context.GetLogger(lh).Debug("GetImageLayer") - layers := lh.Repository.Layers() - layer, err := layers.Fetch(lh.Digest) - - if err != nil { - switch err := err.(type) { - case distribution.ErrUnknownLayer: - w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) - default: - lh.Errors.Push(v2.ErrorCodeUnknown, err) - } - return - } - - handler, err := layer.Handler(r) - if err != nil { - context.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err) - lh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - handler.ServeHTTP(w, r) -} diff --git a/docs/handlers/layerupload.go b/docs/handlers/layerupload.go deleted file mode 100644 index 1591d98d..00000000 --- a/docs/handlers/layerupload.go +++ /dev/null @@ -1,344 +0,0 @@ -package handlers - -import ( - "fmt" - "io" - "net/http" - "net/url" - "os" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" - "github.com/gorilla/handlers" -) - -// layerUploadDispatcher constructs and returns the layer upload handler for -// the given request context. -func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - luh := &layerUploadHandler{ - Context: ctx, - UUID: getUploadUUID(ctx), - } - - handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(luh.StartLayerUpload), - "GET": http.HandlerFunc(luh.GetUploadStatus), - "HEAD": http.HandlerFunc(luh.GetUploadStatus), - "PATCH": http.HandlerFunc(luh.PatchLayerData), - "PUT": http.HandlerFunc(luh.PutLayerUploadComplete), - "DELETE": http.HandlerFunc(luh.CancelLayerUpload), - }) - - if luh.UUID != "" { - state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) - if err != nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - luh.State = state - - if state.Name != ctx.Repository.Name() { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, luh.Repository.Name()) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - if state.UUID != luh.UUID { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, luh.UUID) - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - }) - } - - layers := ctx.Repository.Layers() - upload, err := layers.Resume(luh.UUID) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) - if err == distribution.ErrLayerUploadUnknown { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - }) - } - luh.Upload = upload - - if state.Offset > 0 { - // Seek the layer upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(luh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking layer upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } else if nn != luh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, luh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) - upload.Cancel() - }) - } - } - - handler = closeResources(handler, luh.Upload) - } - - return handler -} - -// layerUploadHandler handles the http layer upload process. -type layerUploadHandler struct { - *Context - - // UUID identifies the upload instance for the current request. - UUID string - - Upload distribution.LayerUpload - - State layerUploadState -} - -// StartLayerUpload begins the layer upload process and allocates a server- -// side upload session. -func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { - layers := luh.Repository.Layers() - upload, err := layers.Upload() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - luh.Upload = upload - defer luh.Upload.Close() - - if err := luh.layerUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.Upload.UUID()) - w.WriteHeader(http.StatusAccepted) -} - -// GetUploadStatus returns the status of a given upload, identified by uuid. -func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - // TODO(dmcgowan): Set last argument to false in layerUploadResponse when - // resumable upload is supported. This will enable returning a non-zero - // range for clients to begin uploading at an offset. - if err := luh.layerUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.WriteHeader(http.StatusNoContent) -} - -// PatchLayerData writes data to an upload. -func (luh *layerUploadHandler) PatchLayerData(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - ct := r.Header.Get("Content-Type") - if ct != "" && ct != "application/octet-stream" { - w.WriteHeader(http.StatusBadRequest) - // TODO(dmcgowan): encode error - return - } - - // TODO(dmcgowan): support Content-Range header to seek and write range - - // Copy the data - if _, err := io.Copy(luh.Upload, r.Body); err != nil { - ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - if err := luh.layerUploadResponse(w, r, false); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -// PutLayerUploadComplete takes the final request of a layer upload. The -// request may include all the layer data or no layer data. Any data -// provided is received and verified. If successful, the layer is linked -// into the blob store and 201 Created is returned with the canonical -// url of the layer. -func (luh *layerUploadHandler) PutLayerUploadComplete(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - - if dgstStr == "" { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") - return - } - - dgst, err := digest.ParseDigest(dgstStr) - if err != nil { - // no digest? return error, but allow retry. - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") - return - } - - // TODO(stevvooe): Consider checking the error on this copy. - // Theoretically, problems should be detected during verification but we - // may miss a root cause. - - // Read in the data, if any. - if _, err := io.Copy(luh.Upload, r.Body); err != nil { - ctxu.GetLogger(luh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return - } - - layer, err := luh.Upload.Finish(dgst) - if err != nil { - switch err := err.(type) { - case distribution.ErrLayerInvalidDigest: - w.WriteHeader(http.StatusBadRequest) - luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - default: - ctxu.GetLogger(luh).Errorf("unknown error completing upload: %#v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - } - - // Clean up the backend layer data if there was an error. - if err := luh.Upload.Cancel(); err != nil { - // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(luh).Errorf("error canceling upload after error: %v", err) - } - - return - } - - // Build our canonical layer url - layerURL, err := luh.urlBuilder.BuildBlobURL(luh.Repository.Name(), layer.Digest()) - if err != nil { - luh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - w.Header().Set("Location", layerURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", layer.Digest().String()) - w.WriteHeader(http.StatusCreated) -} - -// CancelLayerUpload cancels an in-progress upload of a layer. -func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { - if luh.Upload == nil { - w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) - return - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - if err := luh.Upload.Cancel(); err != nil { - ctxu.GetLogger(luh).Errorf("error encountered canceling upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.PushErr(err) - } - - w.WriteHeader(http.StatusNoContent) -} - -// layerUploadResponse provides a standard request for uploading layers and -// chunk responses. This sets the correct headers but the response status is -// left to the caller. The fresh argument is used to ensure that new layer -// uploads always start at a 0 offset. This allows disabling resumable push -// by always returning a 0 offset on check status. -func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = luh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(luh).Errorf("unable get current offset of layer upload: %v", err) - return err - } - } - - // TODO(stevvooe): Need a better way to manage the upload state automatically. - luh.State.Name = luh.Repository.Name() - luh.State.UUID = luh.Upload.UUID() - luh.State.Offset = offset - luh.State.StartedAt = luh.Upload.StartedAt() - - token, err := hmacKey(luh.Config.HTTP.Secret).packUploadState(luh.State) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload state token: %s", err) - return err - } - - uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL( - luh.Repository.Name(), luh.Upload.UUID(), - url.Values{ - "_state": []string{token}, - }) - if err != nil { - ctxu.GetLogger(luh).Infof("error building upload url: %s", err) - return err - } - - endRange := offset - if endRange > 0 { - endRange = endRange - 1 - } - - w.Header().Set("Docker-Upload-UUID", luh.UUID) - w.Header().Set("Location", uploadURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) - - return nil -} diff --git a/docs/storage/layer_test.go b/docs/storage/blob_test.go similarity index 56% rename from docs/storage/layer_test.go rename to docs/storage/blob_test.go index 2ea99813..6843922a 100644 --- a/docs/storage/layer_test.go +++ b/docs/storage/blob_test.go @@ -13,14 +13,13 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/cache" - storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" ) -// TestSimpleLayerUpload covers the layer upload process, exercising common +// TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. -func TestSimpleLayerUpload(t *testing.T) { +func TestSimpleBlobUpload(t *testing.T) { randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { @@ -36,35 +35,35 @@ func TestSimpleLayerUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - ls := repository.Layers() + bs := repository.Blobs(ctx) h := sha256.New() rd := io.TeeReader(randomDataReader, h) - layerUpload, err := ls.Upload() + blobUpload, err := bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } // Cancel the upload then restart it - if err := layerUpload.Cancel(); err != nil { + if err := blobUpload.Cancel(ctx); err != nil { t.Fatalf("unexpected error during upload cancellation: %v", err) } // Do a resume, get unknown upload - layerUpload, err = ls.Resume(layerUpload.UUID()) - if err != distribution.ErrLayerUploadUnknown { + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + if err != distribution.ErrBlobUploadUnknown { t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) } // Restart! - layerUpload, err = ls.Upload() + blobUpload, err = bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } @@ -75,7 +74,7 @@ func TestSimpleLayerUpload(t *testing.T) { t.Fatalf("error getting seeker size of random data: %v", err) } - nn, err := io.Copy(layerUpload, rd) + nn, err := io.Copy(blobUpload, rd) if err != nil { t.Fatalf("unexpected error uploading layer data: %v", err) } @@ -84,46 +83,51 @@ func TestSimpleLayerUpload(t *testing.T) { t.Fatalf("layer data write incomplete") } - offset, err := layerUpload.Seek(0, os.SEEK_CUR) + offset, err := blobUpload.Seek(0, os.SEEK_CUR) if err != nil { t.Fatalf("unexpected error seeking layer upload: %v", err) } if offset != nn { - t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn) + t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) } - layerUpload.Close() + blobUpload.Close() // Do a resume, for good fun - layerUpload, err = ls.Resume(layerUpload.UUID()) + blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != nil { t.Fatalf("unexpected error resuming upload: %v", err) } sha256Digest := digest.NewDigest("sha256", h) - layer, err := layerUpload.Finish(dgst) - + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) if err != nil { t.Fatalf("unexpected error finishing layer upload: %v", err) } // After finishing an upload, it should no longer exist. - if _, err := ls.Resume(layerUpload.UUID()); err != distribution.ErrLayerUploadUnknown { + if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) } // Test for existence. - exists, err := ls.Exists(layer.Digest()) + statDesc, err := bs.Stat(ctx, desc.Digest) if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } - if !exists { - t.Fatalf("layer should now exist") + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + h.Reset() - nn, err = io.Copy(h, layer) + nn, err = io.Copy(h, rc) if err != nil { t.Fatalf("error reading layer: %v", err) } @@ -137,21 +141,21 @@ func TestSimpleLayerUpload(t *testing.T) { } } -// TestSimpleLayerRead just creates a simple layer file and ensures that basic +// TestSimpleBlobRead just creates a simple blob file and ensures that basic // open, read, seek, read works. More specific edge cases should be covered in // other tests. -func TestSimpleLayerRead(t *testing.T) { +func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - ls := repository.Layers() + bs := repository.Blobs(ctx) - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() + randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. if err != nil { t.Fatalf("error creating random data: %v", err) } @@ -159,31 +163,14 @@ func TestSimpleLayerRead(t *testing.T) { dgst := digest.Digest(tarSumStr) // Test for existence. - exists, err := ls.Exists(dgst) - if err != nil { - t.Fatalf("unexpected error checking for existence: %v", err) + desc, err := bs.Stat(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when testing for existence: %v", err) } - if exists { - t.Fatalf("layer should not exist") - } - - // Try to get the layer and make sure we get a not found error - layer, err := ls.Fetch(dgst) - if err == nil { - t.Fatalf("error expected fetching unknown layer") - } - - switch err.(type) { - case distribution.ErrUnknownLayer: - err = nil - default: - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } - - randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader) - if err != nil { - t.Fatalf("unexpected error writing test layer: %v", err) + rc, err := bs.Open(ctx, dgst) + if err != distribution.ErrBlobUnknown { + t.Fatalf("expected not found error when opening non-existent blob: %v", err) } randomLayerSize, err := seekerSize(randomLayerReader) @@ -191,45 +178,57 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("error getting seeker size for random layer: %v", err) } - layer, err = ls.Fetch(dgst) + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Length: randomLayerSize} + t.Logf("desc: %v", descBefore) + + desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) if err != nil { - t.Fatal(err) + t.Fatalf("error adding blob to blobservice: %v", err) } - defer layer.Close() + + if desc.Length != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Length, randomLayerSize) + } + + rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + if err != nil { + t.Fatalf("error opening blob with %v: %v", dgst, err) + } + defer rc.Close() // Now check the sha digest and ensure its the same h := sha256.New() - nn, err := io.Copy(h, layer) - if err != nil && err != io.EOF { + nn, err := io.Copy(h, rc) + if err != nil { t.Fatalf("unexpected error copying to hash: %v", err) } if nn != randomLayerSize { - t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) + t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) } sha256Digest := digest.NewDigest("sha256", h) - if sha256Digest != randomLayerDigest { - t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest) + if sha256Digest != desc.Digest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) } - // Now seek back the layer, read the whole thing and check against randomLayerData - offset, err := layer.Seek(0, os.SEEK_SET) + // Now seek back the blob, read the whole thing and check against randomLayerData + offset, err := rc.Seek(0, os.SEEK_SET) if err != nil { - t.Fatalf("error seeking layer: %v", err) + t.Fatalf("error seeking blob: %v", err) } if offset != 0 { t.Fatalf("seek failed: expected 0 offset, got %d", offset) } - p, err := ioutil.ReadAll(layer) + p, err := ioutil.ReadAll(rc) if err != nil { - t.Fatalf("error reading all of layer: %v", err) + t.Fatalf("error reading all of blob: %v", err) } if len(p) != int(randomLayerSize) { - t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize) + t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) } // Reset the randomLayerReader and read back the buffer @@ -253,19 +252,26 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - ls := repository.Layers() + bs := repository.Blobs(ctx) - upload, err := ls.Upload() + wr, err := bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting upload: %v", err) } - io.Copy(upload, bytes.NewReader([]byte{})) + nn, err := io.Copy(wr, bytes.NewReader([]byte{})) + if err != nil { + t.Fatalf("error copying into blob writer: %v", err) + } + + if nn != 0 { + t.Fatalf("unexpected number of bytes copied: %v > 0", nn) + } dgst, err := digest.FromReader(bytes.NewReader([]byte{})) if err != nil { @@ -277,37 +283,16 @@ func TestLayerUploadZeroLength(t *testing.T) { t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) } - layer, err := upload.Finish(dgst) + desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) if err != nil { - t.Fatalf("unexpected error finishing upload: %v", err) + t.Fatalf("unexpected error committing write: %v", err) } - if layer.Digest() != dgst { - t.Fatalf("unexpected digest: %v != %v", layer.Digest(), dgst) + if desc.Digest != dgst { + t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) } } -// writeRandomLayer creates a random layer under name and tarSum using driver -// and pathMapper. An io.ReadSeeker with the data is returned, along with the -// sha256 hex digest. -func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) { - reader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - return nil, "", "", err - } - - tarSum = digest.Digest(tarSumStr) - - // Now, actually create the layer. - randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) - - if _, err := reader.Seek(0, os.SEEK_SET); err != nil { - return nil, "", "", err - } - - return reader, tarSum, randomLayerDigest, err -} - // seekerSize seeks to the end of seeker, checks the size and returns it to // the original state, returning the size. The state of the seeker should be // treated as unknown if an error is returned. @@ -334,46 +319,20 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) { return end, nil } -// createTestLayer creates a simple test layer in the provided driver under -// tarsum dgst, returning the sha256 digest location. This is implemented -// piecemeal and should probably be replaced by the uploader when it's ready. -func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { - h := sha256.New() - rd := io.TeeReader(content, h) - - p, err := ioutil.ReadAll(rd) - +// addBlob simply consumes the reader and inserts into the blob service, +// returning a descriptor on success. +func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { + wr, err := bs.Create(ctx) if err != nil { - return "", nil + return distribution.Descriptor{}, err + } + defer wr.Cancel(ctx) + + if nn, err := io.Copy(wr, rd); err != nil { + return distribution.Descriptor{}, err + } else if nn != desc.Length { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Length) } - blobDigestSHA := digest.NewDigest("sha256", h) - - blobPath, err := pathMapper.path(blobDataPathSpec{ - digest: dgst, - }) - - ctx := context.Background() - if err := driver.PutContent(ctx, blobPath, p); err != nil { - return "", err - } - - if err != nil { - return "", err - } - - layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ - name: name, - digest: dgst, - }) - - if err != nil { - return "", err - } - - if err := driver.PutContent(ctx, layerLinkPath, []byte(dgst)); err != nil { - return "", nil - } - - return blobDigestSHA, err + return wr.Commit(ctx, desc) } diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go new file mode 100644 index 00000000..065453e6 --- /dev/null +++ b/docs/storage/blobserver.go @@ -0,0 +1,72 @@ +package storage + +import ( + "fmt" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// TODO(stevvooe): This should configurable in the future. +const blobCacheControlMaxAge = 365 * 24 * time.Hour + +// blobServer simply serves blobs from a driver instance using a path function +// to identify paths and a descriptor service to fill in metadata. +type blobServer struct { + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) +} + +func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + path, err := bs.pathFn(desc.Digest) + if err != nil { + return err + } + + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + + switch err { + case nil: + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + br, err := newFileReader(ctx, bs.driver, path, desc.Length) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", desc.Digest.String()) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Length)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + } + + // Some unexpected error. + return err +} diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index c0c86929..afe42847 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -1,133 +1,94 @@ package storage import ( - "fmt" - + "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver" ) -// TODO(stevvooe): Currently, the blobStore implementation used by the -// manifest store. The layer store should be refactored to better leverage the -// blobStore, reducing duplicated code. - -// blobStore implements a generalized blob store over a driver, supporting the -// read side and link management. This object is intentionally a leaky -// abstraction, providing utility methods that support creating and traversing -// backend links. +// blobStore implements a the read side of the blob store interface over a +// driver without enforcing per-repository membership. This object is +// intentionally a leaky abstraction, providing utility methods that support +// creating and traversing backend links. type blobStore struct { - driver storagedriver.StorageDriver - pm *pathMapper - ctx context.Context + driver driver.StorageDriver + pm *pathMapper + statter distribution.BlobStatter } -// exists reports whether or not the path exists. If the driver returns error -// other than storagedriver.PathNotFound, an error may be returned. -func (bs *blobStore) exists(dgst digest.Digest) (bool, error) { - path, err := bs.path(dgst) +var _ distribution.BlobProvider = &blobStore{} - if err != nil { - return false, err - } - - ok, err := exists(bs.ctx, bs.driver, path) - if err != nil { - return false, err - } - - return ok, nil -} - -// get retrieves the blob by digest, returning it a byte slice. This should -// only be used for small objects. -func (bs *blobStore) get(dgst digest.Digest) ([]byte, error) { +// Get implements the BlobReadService.Get call. +func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { bp, err := bs.path(dgst) if err != nil { return nil, err } - return bs.driver.GetContent(bs.ctx, bp) -} + p, err := bs.driver.GetContent(ctx, bp) + if err != nil { + switch err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUnknown + } -// link links the path to the provided digest by writing the digest into the -// target file. -func (bs *blobStore) link(path string, dgst digest.Digest) error { - if exists, err := bs.exists(dgst); err != nil { - return err - } else if !exists { - return fmt.Errorf("cannot link non-existent blob") + return nil, err } - // The contents of the "link" file are the exact string contents of the - // digest, which is specified in that package. - return bs.driver.PutContent(bs.ctx, path, []byte(dgst)) + return p, err } -// linked reads the link at path and returns the content. -func (bs *blobStore) linked(path string) ([]byte, error) { - linked, err := bs.readlink(path) +func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + desc, err := bs.statter.Stat(ctx, dgst) if err != nil { return nil, err } - return bs.get(linked) + path, err := bs.path(desc.Digest) + if err != nil { + return nil, err + } + + return newFileReader(ctx, bs.driver, path, desc.Length) } -// readlink returns the linked digest at path. -func (bs *blobStore) readlink(path string) (digest.Digest, error) { - content, err := bs.driver.GetContent(bs.ctx, path) - if err != nil { - return "", err - } - - linked, err := digest.ParseDigest(string(content)) - if err != nil { - return "", err - } - - if exists, err := bs.exists(linked); err != nil { - return "", err - } else if !exists { - return "", fmt.Errorf("link %q invalid: blob %s does not exist", path, linked) - } - - return linked, nil -} - -// resolve reads the digest link at path and returns the blob store link. -func (bs *blobStore) resolve(path string) (string, error) { - dgst, err := bs.readlink(path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - -// put stores the content p in the blob store, calculating the digest. If the +// Put stores the content p in the blob store, calculating the digest. If the // content is already present, only the digest will be returned. This should -// only be used for small objects, such as manifests. -func (bs *blobStore) put(p []byte) (digest.Digest, error) { +// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations +func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { dgst, err := digest.FromBytes(p) if err != nil { - context.GetLogger(bs.ctx).Errorf("error digesting content: %v, %s", err, string(p)) - return "", err + context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) + return distribution.Descriptor{}, err + } + + desc, err := bs.statter.Stat(ctx, dgst) + if err == nil { + // content already present + return desc, nil + } else if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) + // real error, return it + return distribution.Descriptor{}, err } bp, err := bs.path(dgst) if err != nil { - return "", err + return distribution.Descriptor{}, err } - // If the content already exists, just return the digest. - if exists, err := bs.exists(dgst); err != nil { - return "", err - } else if exists { - return dgst, nil - } + // TODO(stevvooe): Write out mediatype here, as well. - return dgst, bs.driver.PutContent(bs.ctx, bp, p) + return distribution.Descriptor{ + Length: int64(len(p)), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, bs.driver.PutContent(ctx, bp, p) } // path returns the canonical path for the blob identified by digest. The blob @@ -144,16 +105,86 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) { return bp, nil } -// exists provides a utility method to test whether or not a path exists -func exists(ctx context.Context, driver storagedriver.StorageDriver, path string) (bool, error) { - if _, err := driver.Stat(ctx, path); err != nil { +// link links the path to the provided digest by writing the digest into the +// target file. Caller must ensure that the blob actually exists. +func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { + // The contents of the "link" file are the exact string contents of the + // digest, which is specified in that package. + return bs.driver.PutContent(ctx, path, []byte(dgst)) +} + +// readlink returns the linked digest at path. +func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { + content, err := bs.driver.GetContent(ctx, path) + if err != nil { + return "", err + } + + linked, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return linked, nil +} + +// resolve reads the digest link at path and returns the blob store path. +func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { + dgst, err := bs.readlink(ctx, path) + if err != nil { + return "", err + } + + return bs.path(dgst) +} + +type blobStatter struct { + driver driver.StorageDriver + pm *pathMapper +} + +var _ distribution.BlobStatter = &blobStatter{} + +// Stat implements BlobStatter.Stat by returning the descriptor for the blob +// in the main blob store. If this method returns successfully, there is +// strong guarantee that the blob exists and is available. +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + path, err := bs.pm.path(blobDataPathSpec{ + digest: dgst, + }) + if err != nil { + return distribution.Descriptor{}, err + } + + fi, err := bs.driver.Stat(ctx, path) + if err != nil { switch err := err.(type) { - case storagedriver.PathNotFoundError: - return false, nil + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown default: - return false, err + return distribution.Descriptor{}, err } } - return true, nil + if fi.IsDir() { + // NOTE(stevvooe): This represents a corruption situation. Somehow, we + // calculated a blob path and then detected a directory. We log the + // error and then error on the side of not knowing about the blob. + context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + // TODO(stevvooe): Add method to resolve the mediatype. We can store and + // cache a "global" media type for the blob, even if a specific repo has a + // mediatype that overrides the main one. + + return distribution.Descriptor{ + Length: fi.Size(), + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + }, nil } diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go new file mode 100644 index 00000000..a9a625b6 --- /dev/null +++ b/docs/storage/blobwriter.go @@ -0,0 +1,469 @@ +package storage + +import ( + "fmt" + "io" + "os" + "path" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +// layerWriter is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type blobWriter struct { + blobStore *linkedBlobStore + + id string + startedAt time.Time + resumableDigester digest.ResumableDigester + + // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy + // LayerUpload Interface + bufferedFileWriter +} + +var _ distribution.BlobWriter = &blobWriter{} + +// ID returns the identifier for this upload. +func (bw *blobWriter) ID() string { + return bw.id +} + +func (bw *blobWriter) StartedAt() time.Time { + return bw.startedAt +} + +// Commit marks the upload as completed, returning a valid descriptor. The +// final size and digest are checked against the first descriptor provided. +func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + context.GetLogger(ctx).Debug("(*blobWriter).Commit") + + if err := bw.bufferedFileWriter.Close(); err != nil { + return distribution.Descriptor{}, err + } + + canonical, err := bw.validateBlob(ctx, desc) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.moveBlob(ctx, canonical); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { + return distribution.Descriptor{}, err + } + + if err := bw.removeResources(ctx); err != nil { + return distribution.Descriptor{}, err + } + + return canonical, nil +} + +// Rollback the blob upload process, releasing any resources associated with +// the writer and canceling the operation. +func (bw *blobWriter) Cancel(ctx context.Context) error { + context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.removeResources(ctx); err != nil { + return err + } + + bw.Close() + return nil +} + +func (bw *blobWriter) Write(p []byte) (int, error) { + if bw.resumableDigester == nil { + return bw.bufferedFileWriter.Write(p) + } + + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeHashAt(bw.blobStore.ctx, bw.offset); err != nil { + return 0, err + } + + return io.MultiWriter(&bw.bufferedFileWriter, bw.resumableDigester).Write(p) +} + +func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { + if bw.resumableDigester == nil { + return bw.bufferedFileWriter.ReadFrom(r) + } + + // Ensure that the current write offset matches how many bytes have been + // written to the digester. If not, we need to update the digest state to + // match the current write position. + if err := bw.resumeHashAt(bw.blobStore.ctx, bw.offset); err != nil { + return 0, err + } + + return bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.resumableDigester)) +} + +func (bw *blobWriter) Close() error { + if bw.err != nil { + return bw.err + } + + if bw.resumableDigester != nil { + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + return err + } + } + + return bw.bufferedFileWriter.Close() +} + +// validateBlob checks the data against the digest, returning an error if it +// does not match. The canonical descriptor is returned. +func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + var ( + verified, fullHash bool + canonical digest.Digest + ) + + if desc.Digest == "" { + // if no descriptors are provided, we have nothing to validate + // against. We don't really want to support this for the registry. + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Reason: fmt.Errorf("cannot validate against empty digest"), + } + } + + // Stat the on disk file + if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // NOTE(stevvooe): We really don't care if the file is + // not actually present for the reader. We now assume + // that the desc length is zero. + desc.Length = 0 + default: + // Any other error we want propagated up the stack. + return distribution.Descriptor{}, err + } + } else { + if fi.IsDir() { + return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) + } + + bw.size = fi.Size() + } + + if desc.Length > 0 { + if desc.Length != bw.size { + return distribution.Descriptor{}, distribution.ErrBlobInvalidLength + } + } else { + // if provided 0 or negative length, we can assume caller doesn't know or + // care about length. + desc.Length = bw.size + } + + if bw.resumableDigester != nil { + // Restore the hasher state to the end of the upload. + if err := bw.resumeHashAt(ctx, bw.size); err != nil { + return distribution.Descriptor{}, err + } + + canonical = bw.resumableDigester.Digest() + + if canonical.Algorithm() == desc.Digest.Algorithm() { + // Common case: client and server prefer the same canonical digest + // algorithm - currently SHA256. + verified = desc.Digest == canonical + } else { + // The client wants to use a different digest algorithm. They'll just + // have to be patient and wait for us to download and re-hash the + // uploaded content using that digest algorithm. + fullHash = true + } + } else { + // Not using resumable digests, so we need to hash the entire layer. + fullHash = true + } + + if fullHash { + digester := digest.NewCanonicalDigester() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length) + if err != nil { + return distribution.Descriptor{}, err + } + + tr := io.TeeReader(fr, digester) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() + } + + if !verified { + context.GetLoggerWithFields(ctx, + map[string]interface{}{ + "canonical": canonical, + "provided": desc.Digest, + }, "canonical", "provided"). + Errorf("canonical digest does match provided digest") + return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ + Digest: desc.Digest, + Reason: fmt.Errorf("content does not match digest"), + } + } + + // update desc with canonical hash + desc.Digest = canonical + + if desc.MediaType == "" { + desc.MediaType = "application/octet-stream" + } + + return desc, nil +} + +// moveBlob moves the data into its final, hash-qualified destination, +// identified by dgst. The layer should be validated before commencing the +// move. +func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { + blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{ + digest: desc.Digest, + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + return err + } + } else { + // If the path exists, we can assume that the content has already + // been uploaded, since the blob storage is content-addressable. + // While it may be corrupted, detection of such corruption belongs + // elsewhere. + return nil + } + + // If no data was received, we may not actually have a file on disk. Check + // the size here and write a zero-length file to blobPath if this is the + // case. For the most part, this should only ever happen with zero-length + // tars. + if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + // HACK(stevvooe): This is slightly dangerous: if we verify above, + // get a hash, then the underlying file is deleted, we risk moving + // a zero-length blob into a nonzero-length blob location. To + // prevent this horrid thing, we employ the hack of only allowing + // to this happen for the zero tarsum. + if desc.Digest == digest.DigestSha256EmptyTar { + return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) + } + + // We let this fail during the move below. + logrus. + WithField("upload.id", bw.ID()). + WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") + default: + return err // unrelated error + } + } + + // TODO(stevvooe): We should also write the mediatype when executing this move. + + return bw.blobStore.driver.Move(ctx, bw.path, blobPath) +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.resumableDigester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +// resumeHashAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (bw *blobWriter) resumeHashAt(ctx context.Context, offset int64) error { + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + if offset == int64(bw.resumableDigester.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := bw.driver.Delete(ctx, hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + bw.resumableDigester.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = bw.resumableDigester.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(bw.resumableDigester.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(bw.resumableDigester.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", bw.resumableDigester.Len(), err) + } + + if _, err := io.CopyN(bw.resumableDigester, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.resumableDigester.Digest().Algorithm(), + offset: int64(bw.resumableDigester.Len()), + }) + if err != nil { + return err + } + + hashState, err := bw.resumableDigester.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} diff --git a/docs/storage/blobwriter_nonresumable.go b/docs/storage/blobwriter_nonresumable.go new file mode 100644 index 00000000..ac2d7877 --- /dev/null +++ b/docs/storage/blobwriter_nonresumable.go @@ -0,0 +1,6 @@ +// +build noresumabledigest + +package storage + +func (bw *blobWriter) setupResumableDigester() { +} diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go new file mode 100644 index 00000000..f20a6c36 --- /dev/null +++ b/docs/storage/blobwriter_resumable.go @@ -0,0 +1,9 @@ +// +build !noresumabledigest + +package storage + +import "github.com/docker/distribution/digest" + +func (bw *blobWriter) setupResumableDigester() { + bw.resumableDigester = digest.NewCanonicalResumableDigester() +} diff --git a/docs/storage/cache/cache.go b/docs/storage/cache/cache.go index a21cefd5..e7471c27 100644 --- a/docs/storage/cache/cache.go +++ b/docs/storage/cache/cache.go @@ -1,98 +1,38 @@ // Package cache provides facilities to speed up access to the storage -// backend. Typically cache implementations deal with internal implementation -// details at the backend level, rather than generalized caches for -// distribution related interfaces. In other words, unless the cache is -// specific to the storage package, it belongs in another package. +// backend. package cache import ( "fmt" + "github.com/docker/distribution" "github.com/docker/distribution/digest" - "golang.org/x/net/context" ) -// ErrNotFound is returned when a meta item is not found. -var ErrNotFound = fmt.Errorf("not found") +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService -// LayerMeta describes the backend location and length of layer data. -type LayerMeta struct { - Path string - Length int64 + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } -// LayerInfoCache is a driver-aware cache of layer metadata. Basically, it -// provides a fast cache for checks against repository metadata, avoiding -// round trips to backend storage. Note that this is different from a pure -// layer cache, which would also provide access to backing data, as well. Such -// a cache should be implemented as a middleware, rather than integrated with -// the storage backend. -// -// Note that most implementations rely on the caller to do strict checks on on -// repo and dgst arguments, since these are mostly used behind existing -// implementations. -type LayerInfoCache interface { - // Contains returns true if the repository with name contains the layer. - Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) - - // Add includes the layer in the given repository cache. - Add(ctx context.Context, repo string, dgst digest.Digest) error - - // Meta provides the location of the layer on the backend and its size. Membership of a - // repository should be tested before using the result, if required. - Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) - - // SetMeta sets the meta data for the given layer. - SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error +func validateDigest(dgst digest.Digest) error { + return dgst.Validate() } -// base implements common checks between cache implementations. Note that -// these are not full checks of input, since that should be done by the -// caller. -type base struct { - LayerInfoCache -} - -func (b *base) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - if repo == "" { - return false, fmt.Errorf("cache: cannot check for empty repository name") - } - - if dgst == "" { - return false, fmt.Errorf("cache: cannot check for empty digests") - } - - return b.LayerInfoCache.Contains(ctx, repo, dgst) -} - -func (b *base) Add(ctx context.Context, repo string, dgst digest.Digest) error { - if repo == "" { - return fmt.Errorf("cache: cannot add empty repository name") - } - - if dgst == "" { - return fmt.Errorf("cache: cannot add empty digest") - } - - return b.LayerInfoCache.Add(ctx, repo, dgst) -} - -func (b *base) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - if dgst == "" { - return LayerMeta{}, fmt.Errorf("cache: cannot get meta for empty digest") - } - - return b.LayerInfoCache.Meta(ctx, dgst) -} - -func (b *base) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - if dgst == "" { - return fmt.Errorf("cache: cannot set meta for empty digest") - } - - if meta.Path == "" { - return fmt.Errorf("cache: cannot set empty path for meta") - } - - return b.LayerInfoCache.SetMeta(ctx, dgst, meta) +func validateDescriptor(desc distribution.Descriptor) error { + if err := validateDigest(desc.Digest); err != nil { + return err + } + + if desc.Length < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Length) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil } diff --git a/docs/storage/cache/cache_test.go b/docs/storage/cache/cache_test.go index 48cef955..e923367a 100644 --- a/docs/storage/cache/cache_test.go +++ b/docs/storage/cache/cache_test.go @@ -3,84 +3,139 @@ package cache import ( "testing" - "golang.org/x/net/context" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" ) -// checkLayerInfoCache takes a cache implementation through a common set of -// operations. If adding new tests, please add them here so new +// checkBlobDescriptorCache takes a cache implementation through a common set +// of operations. If adding new tests, please add them here so new // implementations get the benefit. -func checkLayerInfoCache(t *testing.T, lic LayerInfoCache) { +func checkBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { ctx := context.Background() - exists, err := lic.Contains(ctx, "", "fake:abc") + checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) + checkBlobDescriptorCacheSetAndRead(t, ctx, provider) +} + +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty store: %v", err) + } + + cache, err := provider.RepositoryScoped("") if err == nil { - t.Fatalf("expected error checking for cache item with empty repo") + t.Fatalf("expected an error when asking for invalid repo") } - exists, err = lic.Contains(ctx, "foo/bar", "") - if err == nil { - t.Fatalf("expected error checking for cache item with empty digest") - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") + cache, err = provider.RepositoryScoped("foo/bar") if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) + t.Fatalf("unexpected error getting repository: %v", err) } - if exists { - t.Fatalf("item should not exist") + if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ + Digest: "sha384:abc", + Length: 10, + MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error with invalid digest: %v", err) } - if err := lic.Add(ctx, "", "fake:abc"); err == nil { - t.Fatalf("expected error adding cache item with empty name") + if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ + Digest: "", + Length: 10, + MediaType: "application/octet-stream"}); err == nil { + t.Fatalf("expected error setting value on invalid descriptor") } - if err := lic.Add(ctx, "foo/bar", ""); err == nil { - t.Fatalf("expected error adding cache item with empty digest") + if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { + t.Fatalf("expected error checking for cache item with empty digest: %v", err) } - if err := lic.Add(ctx, "foo/bar", "fake:abc"); err != nil { - t.Fatalf("unexpected error adding item: %v", err) - } - - exists, err = lic.Contains(ctx, "foo/bar", "fake:abc") - if err != nil { - t.Fatalf("unexpected error checking for cache item: %v", err) - } - - if !exists { - t.Fatalf("item should exist") - } - - _, err = lic.Meta(ctx, "") - if err == nil || err == ErrNotFound { - t.Fatalf("expected error getting meta for cache item with empty digest") - } - - _, err = lic.Meta(ctx, "fake:abc") - if err != ErrNotFound { - t.Fatalf("expected unknown layer error getting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty digest") - } - - if err = lic.SetMeta(ctx, "foo/bar", LayerMeta{}); err == nil { - t.Fatalf("expected error setting meta for cache item with empty meta") - } - - expected := LayerMeta{Path: "/foo/bar", Length: 20} - if err := lic.SetMeta(ctx, "foo/bar", expected); err != nil { - t.Fatalf("unexpected error setting meta: %v", err) - } - - meta, err := lic.Meta(ctx, "foo/bar") - if err != nil { - t.Fatalf("unexpected error getting meta: %v", err) - } - - if meta != expected { - t.Fatalf("retrieved meta data did not match: %v", err) + if _, err := cache.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + t.Fatalf("expected unknown blob error with empty repo: %v", err) + } +} + +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Length: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // also check that we set the canonical key ("fake:abc") + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("descriptor not returned for canonical key: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // ensure that global gets extra descriptor mapping + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // get at it through canonical descriptor + desc, err = provider.Stat(ctx, expected.Digest) + if err != nil { + t.Fatalf("unexpected error checking glboal descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + // now, we set the repo local mediatype to something else and ensure it + // doesn't get changed in the provider cache. + expected.MediaType = "application/json" + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("unexpected error setting descriptor: %v", err) + } + + desc, err = cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting descriptor: %v", err) + } + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) + } + + desc, err = provider.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error getting global descriptor: %v", err) + } + + expected.MediaType = "application/octet-stream" // expect original mediatype in global + + if desc != expected { + t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } } diff --git a/docs/storage/cache/memory.go b/docs/storage/cache/memory.go index 6d949792..40ab0d94 100644 --- a/docs/storage/cache/memory.go +++ b/docs/storage/cache/memory.go @@ -1,63 +1,149 @@ package cache import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "golang.org/x/net/context" + "github.com/docker/distribution/registry/api/v2" ) -// inmemoryLayerInfoCache is a map-based implementation of LayerInfoCache. -type inmemoryLayerInfoCache struct { - membership map[string]map[digest.Digest]struct{} - meta map[digest.Digest]LayerMeta +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex } -// NewInMemoryLayerInfoCache provides an implementation of LayerInfoCache that -// stores results in memory. -func NewInMemoryLayerInfoCache() LayerInfoCache { - return &base{&inmemoryLayerInfoCache{ - membership: make(map[string]map[digest.Digest]struct{}), - meta: make(map[digest.Digest]LayerMeta), - }} +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } } -func (ilic *inmemoryLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - members, ok := ilic.membership[repo] - if !ok { - return false, nil +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRespositoryName(repo); err != nil { + return nil, err } - _, ok = members[dgst] - return ok, nil + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil } -// Add adds the layer to the redis repository blob set. -func (ilic *inmemoryLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - members, ok := ilic.membership[repo] - if !ok { - members = make(map[digest.Digest]struct{}) - ilic.membership[repo] = members +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) } - members[dgst] = struct{}{} + // we already know it, do nothing + return err +} - return nil -} - -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (ilic *inmemoryLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - meta, ok := ilic.meta[dgst] - if !ok { - return LayerMeta{}, ErrNotFound - } - - return meta, nil -} - -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (ilic *inmemoryLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - ilic.meta[dgst] = meta +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if rsimbdcp.repository == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if rsimbdcp.repository == nil { + // allocate map since we are setting it now. + rsimbdcp.parent.mu.Lock() + var ok bool + // have to read back value since we may have allocated elsewhere. + rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + rsimbdcp.repository = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository + } + + rsimbdcp.parent.mu.Unlock() + } + + if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := validateDigest(dgst); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := validateDigest(dgst); err != nil { + return err + } + + if err := validateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc return nil } diff --git a/docs/storage/cache/memory_test.go b/docs/storage/cache/memory_test.go index 417e982e..9f2ce460 100644 --- a/docs/storage/cache/memory_test.go +++ b/docs/storage/cache/memory_test.go @@ -2,8 +2,8 @@ package cache import "testing" -// TestInMemoryLayerInfoCache checks the in memory implementation is working +// TestInMemoryBlobInfoCache checks the in memory implementation is working // correctly. -func TestInMemoryLayerInfoCache(t *testing.T) { - checkLayerInfoCache(t, NewInMemoryLayerInfoCache()) +func TestInMemoryBlobInfoCache(t *testing.T) { + checkBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) } diff --git a/docs/storage/cache/redis.go b/docs/storage/cache/redis.go index 6b8f7679..c0e542bc 100644 --- a/docs/storage/cache/redis.go +++ b/docs/storage/cache/redis.go @@ -1,20 +1,28 @@ package cache import ( - ctxu "github.com/docker/distribution/context" + "fmt" + + "github.com/docker/distribution/registry/api/v2" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/garyburd/redigo/redis" - "golang.org/x/net/context" ) -// redisLayerInfoCache provides an implementation of storage.LayerInfoCache -// based on redis. Layer info is stored in two parts. The first provide fast -// access to repository membership through a redis set for each repo. The -// second is a redis hash keyed by the digest of the layer, providing path and -// length information. Note that there is no implied relationship between -// these two caches. The layer may exist in one, both or none and the code -// must be written this way. -type redisLayerInfoCache struct { +// redisBlobStatService provides an implementation of +// BlobDescriptorCacheProvider based on redis. Blob descritors are stored in +// two parts. The first provide fast access to repository membership through a +// redis set for each repo. The second is a redis hash keyed by the digest of +// the layer, providing path, length and mediatype information. There is also +// a per-repository redis hash of the blob descriptor, allowing override of +// data. This is currently used to override the mediatype on a per-repository +// basis. +// +// Note that there is no implied relationship between these two caches. The +// layer may exist in one, both or none and the code must be written this way. +type redisBlobDescriptorService struct { pool *redis.Pool // TODO(stevvooe): We use a pool because we don't have great control over @@ -23,76 +31,194 @@ type redisLayerInfoCache struct { // request objects, we can change this to a connection. } -// NewRedisLayerInfoCache returns a new redis-based LayerInfoCache using the -// provided redis connection pool. -func NewRedisLayerInfoCache(pool *redis.Pool) LayerInfoCache { - return &base{&redisLayerInfoCache{ +var _ BlobDescriptorCacheProvider = &redisBlobDescriptorService{} + +// NewRedisBlobDescriptorCacheProvider returns a new redis-based +// BlobDescriptorCacheProvider using the provided redis connection pool. +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) BlobDescriptorCacheProvider { + return &redisBlobDescriptorService{ pool: pool, - }} + } } -// Contains does a membership check on the repository blob set in redis. This -// is used as an access check before looking up global path information. If -// false is returned, the caller should still check the backend to if it -// exists elsewhere. -func (rlic *redisLayerInfoCache) Contains(ctx context.Context, repo string, dgst digest.Digest) (bool, error) { - conn := rlic.pool.Get() - defer conn.Close() +// RepositoryScoped returns the scoped cache. +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if err := v2.ValidateRespositoryName(repo); err != nil { + return nil, err + } - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Contains(%q, %q)", repo, dgst) - return redis.Bool(conn.Do("SISMEMBER", rlic.repositoryBlobSetKey(repo), dgst)) + return &repositoryScopedRedisBlobDescriptorService{ + repo: repo, + upstream: rbds, + }, nil } -// Add adds the layer to the redis repository blob set. -func (rlic *redisLayerInfoCache) Add(ctx context.Context, repo string, dgst digest.Digest) error { - conn := rlic.pool.Get() +// Stat retrieves the descriptor data from the redis hash entry. +func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := validateDigest(dgst); err != nil { + return distribution.Descriptor{}, err + } + + conn := rbds.pool.Get() defer conn.Close() - ctxu.GetLogger(ctx).Debugf("(*redisLayerInfoCache).Add(%q, %q)", repo, dgst) - _, err := conn.Do("SADD", rlic.repositoryBlobSetKey(repo), dgst) - return err + return rbds.stat(ctx, conn, dgst) } -// Meta retrieves the layer meta data from the redis hash, returning -// ErrUnknownLayer if not found. -func (rlic *redisLayerInfoCache) Meta(ctx context.Context, dgst digest.Digest) (LayerMeta, error) { - conn := rlic.pool.Get() - defer conn.Close() - - reply, err := redis.Values(conn.Do("HMGET", rlic.blobMetaHashKey(dgst), "path", "length")) +// stat provides an internal stat call that takes a connection parameter. This +// allows some internal management of the connection scope. +func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")) if err != nil { - return LayerMeta{}, err + return distribution.Descriptor{}, err } - if len(reply) < 2 || reply[0] == nil || reply[1] == nil { - return LayerMeta{}, ErrNotFound + if len(reply) < 2 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + return distribution.Descriptor{}, distribution.ErrBlobUnknown } - var meta LayerMeta - if _, err := redis.Scan(reply, &meta.Path, &meta.Length); err != nil { - return LayerMeta{}, err + var desc distribution.Descriptor + if _, err := redis.Scan(reply, &desc.Digest, &desc.Length, &desc.MediaType); err != nil { + return distribution.Descriptor{}, err } - return meta, nil + return desc, nil } -// SetMeta sets the meta data for the given digest using a redis hash. A hash -// is used here since we may store unrelated fields about a layer in the -// future. -func (rlic *redisLayerInfoCache) SetMeta(ctx context.Context, dgst digest.Digest, meta LayerMeta) error { - conn := rlic.pool.Get() +// SetDescriptor sets the descriptor data for the given digest using a redis +// hash. A hash is used here since we may store unrelated fields about a layer +// in the future. +func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := validateDigest(dgst); err != nil { + return err + } + + if err := validateDescriptor(desc); err != nil { + return err + } + + conn := rbds.pool.Get() defer conn.Close() - _, err := conn.Do("HMSET", rlic.blobMetaHashKey(dgst), "path", meta.Path, "length", meta.Length) - return err + return rbds.setDescriptor(ctx, conn, dgst, desc) } -// repositoryBlobSetKey returns the key for the blob set in the cache. -func (rlic *redisLayerInfoCache) repositoryBlobSetKey(repo string) string { - return "repository::" + repo + "::blobs" +func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), + "digest", desc.Digest, + "length", desc.Length); err != nil { + return err + } + + // Only set mediatype if not already set. + if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), + "mediatype", desc.MediaType); err != nil { + return err + } + + return nil } -// blobMetaHashKey returns the cache key for immutable blob meta data. -func (rlic *redisLayerInfoCache) blobMetaHashKey(dgst digest.Digest) string { +func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { return "blobs::" + dgst.String() } + +type repositoryScopedRedisBlobDescriptorService struct { + repo string + upstream *redisBlobDescriptorService +} + +var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} + +// Stat ensures that the digest is a member of the specified repository and +// forwards the descriptor request to the global blob store. If the media type +// differs for the repository, we override it. +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := validateDigest(dgst); err != nil { + return distribution.Descriptor{}, err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return distribution.Descriptor{}, err + } + + if !member { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + // We allow a per repository mediatype, let's look it up here. + mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) + if err != nil { + return distribution.Descriptor{}, err + } + + if mediatype != "" { + upstream.MediaType = mediatype + } + + return upstream, nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := validateDigest(dgst); err != nil { + return err + } + + if err := validateDescriptor(desc); err != nil { + return err + } + + if dgst != desc.Digest { + if dgst.Algorithm() == desc.Digest.Algorithm() { + return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + } + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + return rsrbds.setDescriptor(ctx, conn, dgst, desc) +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { + if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { + return err + } + + if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { + return err + } + + // Override repository mediatype. + if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { + return err + } + + // Also set the values for the primary descriptor, if they differ by + // algorithm (ie sha256 vs tarsum). + if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { + if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { + return err + } + } + + return nil +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { + return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() +} + +func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { + return "repository::" + rsrbds.repo + "::blobs" +} diff --git a/docs/storage/cache/redis_test.go b/docs/storage/cache/redis_test.go index 7422a7eb..65c2fd3a 100644 --- a/docs/storage/cache/redis_test.go +++ b/docs/storage/cache/redis_test.go @@ -17,7 +17,7 @@ func init() { // TestRedisLayerInfoCache exercises a live redis instance using the cache // implementation. -func TestRedisLayerInfoCache(t *testing.T) { +func TestRedisBlobDescriptorCacheProvider(t *testing.T) { if redisAddr == "" { // fallback to an environement variable redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") @@ -46,5 +46,5 @@ func TestRedisLayerInfoCache(t *testing.T) { t.Fatalf("unexpected error flushing redis db: %v", err) } - checkLayerInfoCache(t, NewRedisLayerInfoCache(pool)) + checkBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } diff --git a/docs/storage/cachedblobdescriptorstore.go b/docs/storage/cachedblobdescriptorstore.go new file mode 100644 index 00000000..a0ccd067 --- /dev/null +++ b/docs/storage/cachedblobdescriptorstore.go @@ -0,0 +1,84 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobStatter +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + atomic.AddUint64(&blobStatterCacheMetrics.Stat.Requests, 1) + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + atomic.AddUint64(&blobStatterCacheMetrics.Stat.Hits, 1) + return desc, nil +fallback: + atomic.AddUint64(&blobStatterCacheMetrics.Stat.Misses, 1) + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics struct { + // Stat tracks calls to the caches. + Stat struct { + Requests uint64 + Hits uint64 + Misses uint64 + } +} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index 72d58f8a..b3a5f520 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -7,7 +7,6 @@ import ( "io" "io/ioutil" "os" - "time" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -29,9 +28,8 @@ type fileReader struct { ctx context.Context // identifying fields - path string - size int64 // size is the total size, must be set. - modtime time.Time // TODO(stevvooe): This is not needed anymore. + path string + size int64 // size is the total size, must be set. // mutable fields rc io.ReadCloser // remote read closer @@ -40,41 +38,17 @@ type fileReader struct { err error // terminal error, if set, reader is closed } -// newFileReader initializes a file reader for the remote file. The read takes -// on the offset and size at the time the reader is created. If the underlying -// file changes, one must create a new fileReader. -func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string) (*fileReader, error) { - rd := &fileReader{ +// newFileReader initializes a file reader for the remote file. The reader +// takes on the size and path that must be determined externally with a stat +// call. The reader operates optimistically, assuming that the file is already +// there. +func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { + return &fileReader{ + ctx: ctx, driver: driver, path: path, - ctx: ctx, - } - - // Grab the size of the layer file, ensuring existence. - if fi, err := driver.Stat(ctx, path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // NOTE(stevvooe): We really don't care if the file is not - // actually present for the reader. If the caller needs to know - // whether or not the file exists, they should issue a stat call - // on the path. There is still no guarantee, since the file may be - // gone by the time the reader is created. The only correct - // behavior is to return a reader that immediately returns EOF. - default: - // Any other error we want propagated up the stack. - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot read a directory") - } - - // Fill in file information - rd.size = fi.Size() - rd.modtime = fi.ModTime() - } - - return rd, nil + size: size, + }, nil } func (fr *fileReader) Read(p []byte) (n int, err error) { @@ -162,11 +136,6 @@ func (fr *fileReader) reader() (io.Reader, error) { fr.rc = rc if fr.brd == nil { - // TODO(stevvooe): Set an optimal buffer size here. We'll have to - // understand the latency characteristics of the underlying network to - // set this correctly, so we may want to leave it to the driver. For - // out of process drivers, we'll have to optimize this buffer size for - // local communication. fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) } else { fr.brd.Reset(fr.rc) diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go index c48bf16d..774a864b 100644 --- a/docs/storage/filereader_test.go +++ b/docs/storage/filereader_test.go @@ -37,7 +37,7 @@ func TestSimpleRead(t *testing.T) { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(ctx, driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("error allocating file reader: %v", err) } @@ -66,7 +66,7 @@ func TestFileReaderSeek(t *testing.T) { t.Fatalf("error putting patterned content: %v", err) } - fr, err := newFileReader(ctx, driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating file reader: %v", err) @@ -162,7 +162,7 @@ func TestFileReaderSeek(t *testing.T) { // read method, with an io.EOF error. func TestFileReaderNonExistentFile(t *testing.T) { driver := inmemory.New() - fr, err := newFileReader(context.Background(), driver, "/doesnotexist") + fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) if err != nil { t.Fatalf("unexpected error initializing reader: %v", err) } diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go index 95930f1d..529fa673 100644 --- a/docs/storage/filewriter.go +++ b/docs/storage/filewriter.go @@ -39,7 +39,6 @@ type bufferedFileWriter struct { // filewriter should implement. type fileWriterInterface interface { io.WriteSeeker - io.WriterAt io.ReaderFrom io.Closer } @@ -110,21 +109,31 @@ func (bfw *bufferedFileWriter) Flush() error { // Write writes the buffer p at the current write offset. func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), -1) - return int(nn), err -} - -// WriteAt writes p at the specified offset. The underlying offset does not -// change. -func (fw *fileWriter) WriteAt(p []byte, offset int64) (n int, err error) { - nn, err := fw.readFromAt(bytes.NewReader(p), offset) + nn, err := fw.ReadFrom(bytes.NewReader(p)) return int(nn), err } // ReadFrom reads reader r until io.EOF writing the contents at the current // offset. func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - return fw.readFromAt(r, -1) + if fw.err != nil { + return 0, fw.err + } + + nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) + + // We should forward the offset, whether or not there was an error. + // Basically, we keep the filewriter in sync with the reader's head. If an + // error is encountered, the whole thing should be retried but we proceed + // from an expected offset, even if the data didn't make it to the + // backend. + fw.offset += nn + + if fw.offset > fw.size { + fw.size = fw.offset + } + + return nn, err } // Seek moves the write position do the requested offest based on the whence @@ -169,34 +178,3 @@ func (fw *fileWriter) Close() error { return nil } - -// readFromAt writes to fw from r at the specified offset. If offset is less -// than zero, the value of fw.offset is used and updated after the operation. -func (fw *fileWriter) readFromAt(r io.Reader, offset int64) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - var updateOffset bool - if offset < 0 { - offset = fw.offset - updateOffset = true - } - - nn, err := fw.driver.WriteStream(fw.ctx, fw.path, offset, r) - - if updateOffset { - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - } - - return nn, err -} diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index 720e9385..858b0327 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -51,7 +51,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) } - fr, err := newFileReader(ctx, driver, path) + fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -78,23 +78,23 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("write did not advance offset: %d != %d", end, len(content)) } - // Double the content, but use the WriteAt method + // Double the content doubled := append(content, content...) doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) if err != nil { t.Fatalf("unexpected error digesting doubled content: %v", err) } - n, err = fw.WriteAt(content, end) + nn, err := fw.ReadFrom(bytes.NewReader(content)) if err != nil { - t.Fatalf("unexpected error writing content at %d: %v", end, err) + t.Fatalf("unexpected error doubling content: %v", err) } - if n != len(content) { + if nn != int64(len(content)) { t.Fatalf("writeat was short: %d != %d", n, len(content)) } - fr, err = newFileReader(ctx, driver, path) + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -111,20 +111,20 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unable to verify write data") } - // Check that WriteAt didn't update the offset. + // Check that Write updated the offset. end, err = fw.Seek(0, os.SEEK_END) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) + if end != int64(len(doubled)) { + t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) } // Now, we copy from one path to another, running the data through the // fileReader to fileWriter, rather than the driver.Move command to ensure // everything is working correctly. - fr, err = newFileReader(ctx, driver, path) + fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } @@ -136,7 +136,7 @@ func TestSimpleWrite(t *testing.T) { } defer fw.Close() - nn, err := io.Copy(fw, fr) + nn, err = io.Copy(fw, fr) if err != nil { t.Fatalf("unexpected error copying data: %v", err) } @@ -145,7 +145,7 @@ func TestSimpleWrite(t *testing.T) { t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) } - fr, err = newFileReader(ctx, driver, "/copied") + fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } diff --git a/docs/storage/layercache.go b/docs/storage/layercache.go deleted file mode 100644 index b9732f20..00000000 --- a/docs/storage/layercache.go +++ /dev/null @@ -1,202 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - "time" - - "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" -) - -// cachedLayerService implements the layer service with path-aware caching, -// using a LayerInfoCache interface. -type cachedLayerService struct { - distribution.LayerService // upstream layer service - repository distribution.Repository - ctx context.Context - driver driver.StorageDriver - *blobStore // global blob store - cache cache.LayerInfoCache -} - -// Exists checks for existence of the digest in the cache, immediately -// returning if it exists for the repository. If not, the upstream is checked. -// When a positive result is found, it is written into the cache. -func (lc *cachedLayerService) Exists(dgst digest.Digest) (bool, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*cachedLayerService).Exists(%q)", dgst) - now := time.Now() - defer func() { - // TODO(stevvooe): Replace this with a decent context-based metrics solution - ctxu.GetLoggerWithField(lc.ctx, "blob.exists.duration", time.Since(now)). - Infof("(*cachedLayerService).Exists(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Hits, 1) - return true, nil - } - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Exists.Misses, 1) - exists, err := lc.LayerService.Exists(dgst) - if err != nil { - return exists, err - } - - if exists { - // we can only cache this if the existence is positive. - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return exists, err -} - -// Fetch checks for the availability of the layer in the repository via the -// cache. If present, the metadata is resolved and the layer is returned. If -// any operation fails, the layer is read directly from the upstream. The -// results are cached, if possible. -func (lc *cachedLayerService) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Fetch(%q)", dgst) - now := time.Now() - defer func() { - ctxu.GetLoggerWithField(lc.ctx, "blob.fetch.duration", time.Since(now)). - Infof("(*layerInfoCache).Fetch(%q)", dgst) - }() - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Requests, 1) - available, err := lc.cache.Contains(lc.ctx, lc.repository.Name(), dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error checking availability of %v@%v: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - if available { - // fast path: get the layer info and return - meta, err := lc.cache.Meta(lc.ctx, dgst) - if err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error fetching %v@%v from cache: %v", lc.repository.Name(), dgst, err) - goto fallback - } - - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Hits, 1) - return newLayerReader(lc.driver, dgst, meta.Path, meta.Length) - } - - // NOTE(stevvooe): Unfortunately, the cache here only makes checks for - // existing layers faster. We'd have to provide more careful - // synchronization with the backend to make the missing case as fast. - -fallback: - atomic.AddUint64(&layerInfoCacheMetrics.Fetch.Misses, 1) - layer, err := lc.LayerService.Fetch(dgst) - if err != nil { - return nil, err - } - - // add the layer to the repository - if err := lc.cache.Add(lc.ctx, lc.repository.Name(), dgst); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error caching repository relationship for %v@%v: %v", lc.repository.Name(), dgst, err) - } - - // lookup layer path and add it to the cache, if it succeds. Note that we - // still return the layer even if we have trouble caching it. - if path, err := lc.resolveLayerPath(layer); err != nil { - ctxu.GetLogger(lc.ctx). - Errorf("error resolving path while caching %v@%v: %v", lc.repository.Name(), dgst, err) - } else { - // add the layer to the cache once we've resolved the path. - if err := lc.cache.SetMeta(lc.ctx, dgst, cache.LayerMeta{Path: path, Length: layer.Length()}); err != nil { - ctxu.GetLogger(lc.ctx).Errorf("error adding meta for %v@%v to cache: %v", lc.repository.Name(), dgst, err) - } - } - - return layer, err -} - -// extractLayerInfo pulls the layerInfo from the layer, attempting to get the -// path information from either the concrete object or by resolving the -// primary blob store path. -func (lc *cachedLayerService) resolveLayerPath(layer distribution.Layer) (path string, err error) { - // try and resolve the type and driver, so we don't have to traverse links - switch v := layer.(type) { - case *layerReader: - // only set path if we have same driver instance. - if v.driver == lc.driver { - return v.path, nil - } - } - - ctxu.GetLogger(lc.ctx).Warnf("resolving layer path during cache lookup (%v@%v)", lc.repository.Name(), layer.Digest()) - // we have to do an expensive stat to resolve the layer location but no - // need to check the link, since we already have layer instance for this - // repository. - bp, err := lc.blobStore.path(layer.Digest()) - if err != nil { - return "", err - } - - return bp, nil -} - -// layerInfoCacheMetrics keeps track of cache metrics for layer info cache -// requests. Note this is kept globally and made available via expvar. For -// more detailed metrics, its recommend to instrument a particular cache -// implementation. -var layerInfoCacheMetrics struct { - // Exists tracks calls to the Exists caches. - Exists struct { - Requests uint64 - Hits uint64 - Misses uint64 - } - - // Fetch tracks calls to the fetch caches. - Fetch struct { - Requests uint64 - Hits uint64 - Misses uint64 - } -} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("layerinfo", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return layerInfoCacheMetrics - })) -} diff --git a/docs/storage/layerreader.go b/docs/storage/layerreader.go deleted file mode 100644 index 044dab09..00000000 --- a/docs/storage/layerreader.go +++ /dev/null @@ -1,104 +0,0 @@ -package storage - -import ( - "fmt" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/driver" -) - -// layerReader implements Layer and provides facilities for reading and -// seeking. -type layerReader struct { - fileReader - - digest digest.Digest -} - -// newLayerReader returns a new layerReader with the digest, path and length, -// eliding round trips to the storage backend. -func newLayerReader(driver driver.StorageDriver, dgst digest.Digest, path string, length int64) (*layerReader, error) { - fr := &fileReader{ - driver: driver, - path: path, - size: length, - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -var _ distribution.Layer = &layerReader{} - -func (lr *layerReader) Digest() digest.Digest { - return lr.digest -} - -func (lr *layerReader) Length() int64 { - return lr.size -} - -func (lr *layerReader) CreatedAt() time.Time { - return lr.modtime -} - -// Close the layer. Should be called when the resource is no longer needed. -func (lr *layerReader) Close() error { - return lr.closeWithErr(distribution.ErrLayerClosed) -} - -func (lr *layerReader) Handler(r *http.Request) (h http.Handler, err error) { - var handlerFunc http.HandlerFunc - - redirectURL, err := lr.fileReader.driver.URLFor(lr.ctx, lr.path, map[string]interface{}{"method": r.Method}) - - switch err { - case nil: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) - } - case driver.ErrUnsupportedMethod: - handlerFunc = func(w http.ResponseWriter, r *http.Request) { - // Fallback to serving the content directly. - http.ServeContent(w, r, lr.digest.String(), lr.CreatedAt(), lr) - } - default: - // Some unexpected error. - return nil, err - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the registry is serving this content itself, check - // the If-None-Match header and return 304 on match. Redirected - // storage implementations do the same. - - if etagMatch(r, lr.digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - setCacheHeaders(w, 86400, lr.digest.String()) - w.Header().Set("Docker-Content-Digest", lr.digest.String()) - handlerFunc.ServeHTTP(w, r) - }), nil -} - -func etagMatch(r *http.Request, etag string) bool { - for _, headerVal := range r.Header["If-None-Match"] { - if headerVal == etag { - return true - } - } - return false -} - -func setCacheHeaders(w http.ResponseWriter, cacheAge int, etag string) { - w.Header().Set("ETag", etag) - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d", cacheAge)) - -} diff --git a/docs/storage/layerstore.go b/docs/storage/layerstore.go deleted file mode 100644 index 8da14ac7..00000000 --- a/docs/storage/layerstore.go +++ /dev/null @@ -1,178 +0,0 @@ -package storage - -import ( - "time" - - "code.google.com/p/go-uuid/uuid" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -type layerStore struct { - repository *repository -} - -func (ls *layerStore) Exists(digest digest.Digest) (bool, error) { - context.GetLogger(ls.repository.ctx).Debug("(*layerStore).Exists") - - // Because this implementation just follows blob links, an existence check - // is pretty cheap by starting and closing a fetch. - _, err := ls.Fetch(digest) - - if err != nil { - switch err.(type) { - case distribution.ErrUnknownLayer: - return false, nil - } - - return false, err - } - - return true, nil -} - -func (ls *layerStore) Fetch(dgst digest.Digest) (distribution.Layer, error) { - ctx := ls.repository.ctx - context.GetLogger(ctx).Debug("(*layerStore).Fetch") - bp, err := ls.path(dgst) - if err != nil { - return nil, err - } - - fr, err := newFileReader(ctx, ls.repository.driver, bp) - if err != nil { - return nil, err - } - - return &layerReader{ - fileReader: *fr, - digest: dgst, - }, nil -} - -// Upload begins a layer upload, returning a handle. If the layer upload -// is already in progress or the layer has already been uploaded, this -// will return an error. -func (ls *layerStore) Upload() (distribution.LayerUpload, error) { - ctx := ls.repository.ctx - context.GetLogger(ctx).Debug("(*layerStore).Upload") - - // NOTE(stevvooe): Consider the issues with allowing concurrent upload of - // the same two layers. Should it be disallowed? For now, we allow both - // parties to proceed and the the first one uploads the layer. - - uuid := uuid.New() - startedAt := time.Now().UTC() - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - // Write a startedat file for this upload - if err := ls.repository.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// Resume continues an in progress layer upload, returning the current -// state of the upload. -func (ls *layerStore) Resume(uuid string) (distribution.LayerUpload, error) { - ctx := ls.repository.ctx - context.GetLogger(ctx).Debug("(*layerStore).Resume") - - startedAtPath, err := ls.repository.pm.path(uploadStartedAtPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - startedAtBytes, err := ls.repository.driver.GetContent(ctx, startedAtPath) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, distribution.ErrLayerUploadUnknown - default: - return nil, err - } - } - - startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) - if err != nil { - return nil, err - } - - path, err := ls.repository.pm.path(uploadDataPathSpec{ - name: ls.repository.Name(), - uuid: uuid, - }) - - if err != nil { - return nil, err - } - - return ls.newLayerUpload(uuid, path, startedAt) -} - -// newLayerUpload allocates a new upload controller with the given state. -func (ls *layerStore) newLayerUpload(uuid, path string, startedAt time.Time) (distribution.LayerUpload, error) { - fw, err := newFileWriter(ls.repository.ctx, ls.repository.driver, path) - if err != nil { - return nil, err - } - - lw := &layerWriter{ - layerStore: ls, - uuid: uuid, - startedAt: startedAt, - bufferedFileWriter: *fw, - } - - lw.setupResumableDigester() - - return lw, nil -} - -func (ls *layerStore) path(dgst digest.Digest) (string, error) { - // We must traverse this path through the link to enforce ownership. - layerLinkPath, err := ls.repository.pm.path(layerLinkPathSpec{name: ls.repository.Name(), digest: dgst}) - if err != nil { - return "", err - } - - blobPath, err := ls.repository.blobStore.resolve(layerLinkPath) - - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return "", distribution.ErrUnknownLayer{ - FSLayer: manifest.FSLayer{BlobSum: dgst}, - } - default: - return "", err - } - } - - return blobPath, nil -} diff --git a/docs/storage/layerwriter.go b/docs/storage/layerwriter.go deleted file mode 100644 index a2672fe6..00000000 --- a/docs/storage/layerwriter.go +++ /dev/null @@ -1,478 +0,0 @@ -package storage - -import ( - "fmt" - "io" - "os" - "path" - "strconv" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -var _ distribution.LayerUpload = &layerWriter{} - -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. -type layerWriter struct { - layerStore *layerStore - - uuid string - startedAt time.Time - resumableDigester digest.ResumableDigester - - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter -} - -var _ distribution.LayerUpload = &layerWriter{} - -// UUID returns the identifier for this upload. -func (lw *layerWriter) UUID() string { - return lw.uuid -} - -func (lw *layerWriter) StartedAt() time.Time { - return lw.startedAt -} - -// Finish marks the upload as completed, returning a valid handle to the -// uploaded layer. The final size and checksum are validated against the -// contents of the uploaded layer. The checksum should be provided in the -// format :. -func (lw *layerWriter) Finish(dgst digest.Digest) (distribution.Layer, error) { - context.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Finish") - - if err := lw.bufferedFileWriter.Close(); err != nil { - return nil, err - } - - var ( - canonical digest.Digest - err error - ) - - // HACK(stevvooe): To deal with s3's lack of consistency, attempt to retry - // validation on failure. Three attempts are made, backing off - // retries*100ms each time. - for retries := 0; ; retries++ { - canonical, err = lw.validateLayer(dgst) - if err == nil { - break - } - - context.GetLoggerWithField(lw.layerStore.repository.ctx, "retries", retries). - Errorf("error validating layer: %v", err) - - if retries < 3 { - time.Sleep(100 * time.Millisecond * time.Duration(retries+1)) - continue - } - - return nil, err - - } - - if err := lw.moveLayer(canonical); err != nil { - // TODO(stevvooe): Cleanup? - return nil, err - } - - // Link the layer blob into the repository. - if err := lw.linkLayer(canonical, dgst); err != nil { - return nil, err - } - - if err := lw.removeResources(); err != nil { - return nil, err - } - - return lw.layerStore.Fetch(canonical) -} - -// Cancel the layer upload process. -func (lw *layerWriter) Cancel() error { - context.GetLogger(lw.layerStore.repository.ctx).Debug("(*layerWriter).Cancel") - if err := lw.removeResources(); err != nil { - return err - } - - lw.Close() - return nil -} - -func (lw *layerWriter) Write(p []byte) (int, error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.Write(p) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return io.MultiWriter(&lw.bufferedFileWriter, lw.resumableDigester).Write(p) -} - -func (lw *layerWriter) ReadFrom(r io.Reader) (n int64, err error) { - if lw.resumableDigester == nil { - return lw.bufferedFileWriter.ReadFrom(r) - } - - // Ensure that the current write offset matches how many bytes have been - // written to the digester. If not, we need to update the digest state to - // match the current write position. - if err := lw.resumeHashAt(lw.offset); err != nil { - return 0, err - } - - return lw.bufferedFileWriter.ReadFrom(io.TeeReader(r, lw.resumableDigester)) -} - -func (lw *layerWriter) Close() error { - if lw.err != nil { - return lw.err - } - - if lw.resumableDigester != nil { - if err := lw.storeHashState(); err != nil { - return err - } - } - - return lw.bufferedFileWriter.Close() -} - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (lw *layerWriter) getStoredHashStates() ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - list: true, - }) - if err != nil { - return nil, err - } - - paths, err := lw.driver.List(lw.layerStore.repository.ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -// resumeHashAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (lw *layerWriter) resumeHashAt(offset int64) error { - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - if offset == int64(lw.resumableDigester.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := lw.getStoredHashStates() - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - ctx := lw.layerStore.repository.ctx - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := lw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - lw.resumableDigester.Reset() - } else { - storedState, err := lw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = lw.resumableDigester.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(lw.resumableDigester.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, lw.driver, lw.path) - if err != nil { - return err - } - - if _, err = fr.Seek(int64(lw.resumableDigester.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", lw.resumableDigester.Len(), err) - } - - if _, err := io.CopyN(lw.resumableDigester, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -func (lw *layerWriter) storeHashState() error { - uploadHashStatePath, err := lw.layerStore.repository.pm.path(uploadHashStatePathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - alg: lw.resumableDigester.Digest().Algorithm(), - offset: int64(lw.resumableDigester.Len()), - }) - if err != nil { - return err - } - - hashState, err := lw.resumableDigester.State() - if err != nil { - return err - } - - return lw.driver.PutContent(lw.layerStore.repository.ctx, uploadHashStatePath, hashState) -} - -// validateLayer checks the layer data against the digest, returning an error -// if it does not match. The canonical digest is returned. -func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) { - var ( - verified, fullHash bool - canonical digest.Digest - ) - - if lw.resumableDigester != nil { - // Restore the hasher state to the end of the upload. - if err := lw.resumeHashAt(lw.size); err != nil { - return "", err - } - - canonical = lw.resumableDigester.Digest() - - if canonical.Algorithm() == dgst.Algorithm() { - // Common case: client and server prefer the same canonical digest - // algorithm - currently SHA256. - verified = dgst == canonical - } else { - // The client wants to use a different digest algorithm. They'll just - // have to be patient and wait for us to download and re-hash the - // uploaded content using that digest algorithm. - fullHash = true - } - } else { - // Not using resumable digests, so we need to hash the entire layer. - fullHash = true - } - - if fullHash { - digester := digest.NewCanonicalDigester() - - digestVerifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return "", err - } - - // Read the file from the backend driver and validate it. - fr, err := newFileReader(lw.layerStore.repository.ctx, lw.bufferedFileWriter.driver, lw.path) - if err != nil { - return "", err - } - - tr := io.TeeReader(fr, digester) - - if _, err = io.Copy(digestVerifier, tr); err != nil { - return "", err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() - } - - if !verified { - context.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst). - Errorf("canonical digest does match provided digest") - return "", distribution.ErrLayerInvalidDigest{ - Digest: dgst, - Reason: fmt.Errorf("content does not match digest"), - } - } - - return canonical, nil -} - -// moveLayer moves the data into its final, hash-qualified destination, -// identified by dgst. The layer should be validated before commencing the -// move. -func (lw *layerWriter) moveLayer(dgst digest.Digest) error { - blobPath, err := lw.layerStore.repository.pm.path(blobDataPathSpec{ - digest: dgst, - }) - - if err != nil { - return err - } - - ctx := lw.layerStore.repository.ctx - // Check for existence - if _, err := lw.driver.Stat(ctx, blobPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // ensure that it doesn't exist. - default: - return err - } - } else { - // If the path exists, we can assume that the content has already - // been uploaded, since the blob storage is content-addressable. - // While it may be corrupted, detection of such corruption belongs - // elsewhere. - return nil - } - - // If no data was received, we may not actually have a file on disk. Check - // the size here and write a zero-length file to blobPath if this is the - // case. For the most part, this should only ever happen with zero-length - // tars. - if _, err := lw.driver.Stat(ctx, lw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // HACK(stevvooe): This is slightly dangerous: if we verify above, - // get a hash, then the underlying file is deleted, we risk moving - // a zero-length blob into a nonzero-length blob location. To - // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. - if dgst == digest.DigestSha256EmptyTar { - return lw.driver.PutContent(ctx, blobPath, []byte{}) - } - - // We let this fail during the move below. - logrus. - WithField("upload.uuid", lw.UUID()). - WithField("digest", dgst).Warnf("attempted to move zero-length content with non-zero digest") - default: - return err // unrelated error - } - } - - return lw.driver.Move(ctx, lw.path, blobPath) -} - -// linkLayer links a valid, written layer blob into the registry under the -// named repository for the upload controller. -func (lw *layerWriter) linkLayer(canonical digest.Digest, aliases ...digest.Digest) error { - dgsts := append([]digest.Digest{canonical}, aliases...) - - // Don't make duplicate links. - seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) - - for _, dgst := range dgsts { - if _, seen := seenDigests[dgst]; seen { - continue - } - seenDigests[dgst] = struct{}{} - - layerLinkPath, err := lw.layerStore.repository.pm.path(layerLinkPathSpec{ - name: lw.layerStore.repository.Name(), - digest: dgst, - }) - - if err != nil { - return err - } - - ctx := lw.layerStore.repository.ctx - if err := lw.layerStore.repository.driver.PutContent(ctx, layerLinkPath, []byte(canonical)); err != nil { - return err - } - } - - return nil -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (lw *layerWriter) removeResources() error { - dataPath, err := lw.layerStore.repository.pm.path(uploadDataPathSpec{ - name: lw.layerStore.repository.Name(), - uuid: lw.uuid, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := lw.driver.Delete(lw.layerStore.repository.ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - logrus.Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} diff --git a/docs/storage/layerwriter_nonresumable.go b/docs/storage/layerwriter_nonresumable.go deleted file mode 100644 index d4350c6b..00000000 --- a/docs/storage/layerwriter_nonresumable.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build noresumabledigest - -package storage - -func (lw *layerWriter) setupResumableDigester() { -} diff --git a/docs/storage/layerwriter_resumable.go b/docs/storage/layerwriter_resumable.go deleted file mode 100644 index 7d8c6335..00000000 --- a/docs/storage/layerwriter_resumable.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !noresumabledigest - -package storage - -import "github.com/docker/distribution/digest" - -func (lw *layerWriter) setupResumableDigester() { - lw.resumableDigester = digest.NewCanonicalResumableDigester() -} diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go new file mode 100644 index 00000000..91dd0616 --- /dev/null +++ b/docs/storage/linkedblobstore.go @@ -0,0 +1,258 @@ +package storage + +import ( + "net/http" + "time" + + "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// linkedBlobStore provides a full BlobService that namespaces the blobs to a +// given repository. Effectively, it manages the links in a given repository +// that grant access to the global blob store. +type linkedBlobStore struct { + *blobStore + blobServer distribution.BlobServer + statter distribution.BlobStatter + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + + // linkPath allows one to control the repository blob link set to which + // the blob store dispatches. This is required because manifest and layer + // blobs have not yet been fully merged. At some point, this functionality + // should be removed an the blob links folder should be merged. + linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) +} + +var _ distribution.BlobStore = &linkedBlobStore{} + +func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return lbs.statter.Stat(ctx, dgst) +} + +func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Get(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return nil, err + } + + return lbs.blobStore.Open(ctx, canonical.Digest) +} + +func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + canonical, err := lbs.Stat(ctx, dgst) // access check + if err != nil { + return err + } + + if canonical.MediaType != "" { + // Set the repository local content type. + w.Header().Set("Content-Type", canonical.MediaType) + } + + return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) +} + +func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + // Place the data in the blob store first. + desc, err := lbs.blobStore.Put(ctx, mediaType, p) + if err != nil { + context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): Write out mediatype if incoming differs from what is + // returned by Put above. Note that we should allow updates for a given + // repository. + + return desc, lbs.linkBlob(ctx, desc) +} + +// Writer begins a blob write session, returning a handle. +func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + uuid := uuid.New() + startedAt := time.Now().UTC() + + path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: uuid, + }) + + if err != nil { + return nil, err + } + + // Write a startedat file for this upload + if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, uuid, path, startedAt) +} + +func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + + startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil, distribution.ErrBlobUploadUnknown + default: + return nil, err + } + } + + startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) + if err != nil { + return nil, err + } + + path, err := lbs.pm.path(uploadDataPathSpec{ + name: lbs.repository.Name(), + id: id, + }) + + if err != nil { + return nil, err + } + + return lbs.newBlobUpload(ctx, id, path, startedAt) +} + +// newLayerUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { + fw, err := newFileWriter(ctx, lbs.driver, path) + if err != nil { + return nil, err + } + + bw := &blobWriter{ + blobStore: lbs, + id: uuid, + startedAt: startedAt, + bufferedFileWriter: *fw, + } + + bw.setupResumableDigester() + + return bw, nil +} + +// linkBlob links a valid, written blob into the registry under the named +// repository for the upload controller. +func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { + dgsts := append([]digest.Digest{canonical.Digest}, aliases...) + + // TODO(stevvooe): Need to write out mediatype for only canonical hash + // since we don't care about the aliases. They are generally unused except + // for tarsum but those versions don't care about mediatype. + + // Don't make duplicate links. + seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + + for _, dgst := range dgsts { + if _, seen := seenDigests[dgst]; seen { + continue + } + seenDigests[dgst] = struct{}{} + + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { + return err + } + } + + return nil +} + +type linkedBlobStatter struct { + *blobStore + repository distribution.Repository + + // linkPath allows one to control the repository blob link set to which + // the blob store dispatches. This is required because manifest and layer + // blobs have not yet been fully merged. At some point, this functionality + // should be removed an the blob links folder should be merged. + linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) +} + +var _ distribution.BlobStatter = &linkedBlobStatter{} + +func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + target, err := lbs.blobStore.readlink(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return distribution.Descriptor{}, distribution.ErrBlobUnknown + default: + return distribution.Descriptor{}, err + } + + // TODO(stevvooe): For backwards compatibility with data in "_layers", we + // need to hit layerLinkPath, as well. Or, somehow migrate to the new path + // layout. + } + + if target != dgst { + // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + } + + // TODO(stevvooe): Look up repository local mediatype and replace that on + // the returned descriptor. + + return lbs.blobStore.statter.Stat(ctx, target) +} + +// blobLinkPath provides the path to the blob link, also known as layers. +func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +} + +// manifestRevisionLinkPath provides the path to the manifest revision link. +func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +} diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 4946785d..07f8de3c 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -4,88 +4,92 @@ import ( "fmt" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) type manifestStore struct { - repository *repository - + repository *repository revisionStore *revisionStore tagStore *tagStore + ctx context.Context } var _ distribution.ManifestService = &manifestStore{} func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Exists") - return ms.revisionStore.exists(dgst) + context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + + _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return false, nil + } + + return false, err + } + + return true, nil } func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(dgst) + context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + return ms.revisionStore.get(ms.ctx, dgst) } func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Put") - - // TODO(stevvooe): Add check here to see if the revision is already - // present in the repository. If it is, we should merge the signatures, do - // a shallow verify (or a full one, doesn't matter) and return an error - // indicating what happened. + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") // Verify the manifest. - if err := ms.verifyManifest(manifest); err != nil { + if err := ms.verifyManifest(ms.ctx, manifest); err != nil { return err } // Store the revision of the manifest - revision, err := ms.revisionStore.put(manifest) + revision, err := ms.revisionStore.put(ms.ctx, manifest) if err != nil { return err } // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision) + return ms.tagStore.tag(manifest.Tag, revision.Digest) } // Delete removes the revision of the specified manfiest. func (ms *manifestStore) Delete(dgst digest.Digest) error { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Delete - unsupported") + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete - unsupported") return fmt.Errorf("deletion of manifests not supported") } func (ms *manifestStore) Tags() ([]string, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).Tags") + context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") return ms.tagStore.tags() } func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).ExistsByTag") + context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") return ms.tagStore.exists(tag) } func (ms *manifestStore) GetByTag(tag string) (*manifest.SignedManifest, error) { - ctxu.GetLogger(ms.repository.ctx).Debug("(*manifestStore).GetByTag") + context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") dgst, err := ms.tagStore.resolve(tag) if err != nil { return nil, err } - return ms.revisionStore.get(dgst) + return ms.revisionStore.get(ms.ctx, dgst) } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid // content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.SignedManifest) error { var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { - // TODO(stevvooe): This needs to be an exported error errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } @@ -103,18 +107,18 @@ func (ms *manifestStore) verifyManifest(mnfst *manifest.SignedManifest) error { } for _, fsLayer := range mnfst.FSLayers { - exists, err := ms.repository.Layers().Exists(fsLayer.BlobSum) + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) if err != nil { - errs = append(errs, err) - } + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } - if !exists { - errs = append(errs, distribution.ErrUnknownLayer{FSLayer: fsLayer}) + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) } } if len(errs) != 0 { - // TODO(stevvooe): These need to be recoverable by a caller. return errs } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 3bafb997..59f174b3 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -6,16 +6,15 @@ import ( "reflect" "testing" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" - "golang.org/x/net/context" ) type manifestStoreTestEnv struct { @@ -30,7 +29,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache()) + registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) repo, err := registry.Repository(ctx, name) if err != nil { @@ -108,20 +107,33 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("expected errors putting manifest") } - // TODO(stevvooe): We expect errors describing all of the missing layers. + switch err := err.(type) { + case distribution.ErrManifestVerification: + if len(err) != 2 { + t.Fatalf("expected 2 verification errors: %#v", err) + } + + for _, err := range err { + if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { + t.Fatalf("unexpected error type: %v", err) + } + } + default: + t.Fatalf("unexpected error verifying manifest: %v", err) + } // Now, upload the layers that were missing! for dgst, rs := range testLayers { - upload, err := env.repository.Layers().Upload() + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } - if _, err := io.Copy(upload, rs); err != nil { + if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } - if _, err := upload.Finish(dgst); err != nil { + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index fe648f51..9e150d3b 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -30,7 +30,7 @@ const storagePathVersion = "v2" // -> //link // -> _layers/ // -// -> _uploads/ +// -> _uploads/ // data // startedat // hashstates// @@ -47,7 +47,7 @@ const storagePathVersion = "v2" // is just a directory of layers which are "linked" into a repository. A layer // can only be accessed through a qualified repository name if it is linked in // the repository. Uploads of layers are managed in the uploads directory, -// which is key by upload uuid. When all data for an upload is received, the +// which is key by upload id. When all data for an upload is received, the // data is moved into the blob store and the upload directory is deleted. // Abandoned uploads can be garbage collected by reading the startedat file // and removing uploads that have been active for longer than a certain time. @@ -80,20 +80,21 @@ const storagePathVersion = "v2" // manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// // manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link // -// Layers: +// Blobs: // -// layerLinkPathSpec: /v2/repositories//_layers/tarsum////link +// layerLinkPathSpec: /v2/repositories//_layers///link // // Uploads: // -// uploadDataPathSpec: /v2/repositories//_uploads//data -// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat -// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// +// uploadDataPathSpec: /v2/repositories//_uploads//data +// uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat +// uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// // // Blob Store: // // blobPathSpec: /v2/blobs/// // blobDataPathSpec: /v2/blobs////data +// blobMediaTypePathSpec: /v2/blobs////data // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. @@ -234,9 +235,14 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return "", err } - layerLinkPathComponents := append(repoPrefix, v.name, "_layers") + // TODO(stevvooe): Right now, all blobs are linked under "_layers". If + // we have future migrations, we may want to rename this to "_blobs". + // A migration strategy would simply leave existing items in place and + // write the new paths, commit a file then delete the old files. - return path.Join(path.Join(append(layerLinkPathComponents, components...)...), "link"), nil + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil case blobDataPathSpec: components, err := digestPathComponents(v.digest, true) if err != nil { @@ -248,15 +254,15 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(append(blobPathPrefix, components...)...), nil case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "data")...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "startedat")...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil case uploadHashStatePathSpec: offset := fmt.Sprintf("%d", v.offset) if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, "_uploads", v.uuid, "hashstates", v.alg, offset)...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -367,8 +373,8 @@ type manifestTagIndexEntryLinkPathSpec struct { func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} -// layerLink specifies a path for a layer link, which is a file with a blob -// id. The layer link will contain a content addressable blob id reference +// blobLinkPathSpec specifies a path for a blob link, which is a file with a +// blob id. The blob link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: // // : @@ -377,7 +383,7 @@ func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} // // sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 // -// This says indicates that there is a blob with the id/digest, calculated via +// This indicates that there is a blob with the id/digest, calculated via // sha256 that can be fetched from the blob store. type layerLinkPathSpec struct { name string @@ -415,7 +421,7 @@ func (blobDataPathSpec) pathSpec() {} // uploads. type uploadDataPathSpec struct { name string - uuid string + id string } func (uploadDataPathSpec) pathSpec() {} @@ -429,7 +435,7 @@ func (uploadDataPathSpec) pathSpec() {} // the client to enforce time out policies. type uploadStartedAtPathSpec struct { name string - uuid string + id string } func (uploadStartedAtPathSpec) pathSpec() {} @@ -437,10 +443,10 @@ func (uploadStartedAtPathSpec) pathSpec() {} // uploadHashStatePathSpec defines the path parameters for the file that stores // the hash function state of an upload at a specific byte offset. If `list` is // set, then the path mapper will generate a list prefix for all hash state -// offsets for the upload identified by the name, uuid, and alg. +// offsets for the upload identified by the name, id, and alg. type uploadHashStatePathSpec struct { name string - uuid string + id string alg string offset int64 list bool diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 7dff6e09..3d17b377 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -111,14 +111,14 @@ func TestPathMapper(t *testing.T) { { spec: uploadDataPathSpec{ name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", + id: "asdf-asdf-asdf-adsf", }, expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", }, { spec: uploadStartedAtPathSpec{ name: "foo/bar", - uuid: "asdf-asdf-asdf-adsf", + id: "asdf-asdf-asdf-adsf", }, expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", }, diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go index 7c0f8813..d4408479 100644 --- a/docs/storage/purgeuploads_test.go +++ b/docs/storage/purgeuploads_test.go @@ -24,7 +24,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time. } func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pm.path(uploadDataPathSpec{name: repo, uuid: uploadID}) + dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -32,7 +32,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa t.Fatalf("Unable to write data file") } - startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, uuid: uploadID}) + startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -115,7 +115,7 @@ func TestPurgeOnlyUploads(t *testing.T) { // Create a directory tree outside _uploads and ensure // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", uuid: uuid.New()}) + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.New()}) if err != nil { t.Fatalf(err.Error()) } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 2834e5eb..659c789e 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -2,38 +2,53 @@ package storage import ( "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" - "golang.org/x/net/context" ) // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - driver storagedriver.StorageDriver - pm *pathMapper - blobStore *blobStore - layerInfoCache cache.LayerInfoCache + blobStore *blobStore + blobServer distribution.BlobServer + statter distribution.BlobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider } // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, layerInfoCache cache.LayerInfoCache) distribution.Namespace { - bs := &blobStore{ +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) distribution.Namespace { + + // create global statter, with cache. + var statter distribution.BlobStatter = &blobStatter{ driver: driver, pm: defaultPathMapper, - ctx: ctx, + } + + if blobDescriptorCacheProvider != nil { + statter = &cachedBlobStatter{ + cache: blobDescriptorCacheProvider, + backend: statter, + } + } + + bs := &blobStore{ + driver: driver, + pm: defaultPathMapper, + statter: statter, } return ®istry{ - driver: driver, blobStore: bs, - - // TODO(sday): This should be configurable. - pm: defaultPathMapper, - layerInfoCache: layerInfoCache, + blobServer: &blobServer{ + driver: driver, + statter: statter, + pathFn: bs.path, + }, + blobDescriptorCacheProvider: blobDescriptorCacheProvider, } } @@ -54,18 +69,29 @@ func (reg *registry) Repository(ctx context.Context, name string) (distribution. } } + var descriptorCache distribution.BlobDescriptorService + if reg.blobDescriptorCacheProvider != nil { + var err error + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(name) + if err != nil { + return nil, err + } + } + return &repository{ - ctx: ctx, - registry: reg, - name: name, + ctx: ctx, + registry: reg, + name: name, + descriptorCache: descriptorCache, }, nil } // repository provides name-scoped access to various services. type repository struct { *registry - ctx context.Context - name string + ctx context.Context + name string + descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. @@ -78,47 +104,68 @@ func (repo *repository) Name() string { // to a request local. func (repo *repository) Manifests() distribution.ManifestService { return &manifestStore{ + ctx: repo.ctx, repository: repo, revisionStore: &revisionStore{ + ctx: repo.ctx, repository: repo, + blobStore: &linkedBlobStore{ + ctx: repo.ctx, + blobStore: repo.blobStore, + repository: repo, + statter: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPath: manifestRevisionLinkPath, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPath: manifestRevisionLinkPath, + }, }, tagStore: &tagStore{ + ctx: repo.ctx, repository: repo, + blobStore: repo.registry.blobStore, }, } } -// Layers returns an instance of the LayerService. Instantiation is cheap and +// Blobs returns an instance of the BlobStore. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Layers() distribution.LayerService { - ls := &layerStore{ +func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { + var statter distribution.BlobStatter = &linkedBlobStatter{ + blobStore: repo.blobStore, repository: repo, + linkPath: blobLinkPath, } - if repo.registry.layerInfoCache != nil { - // TODO(stevvooe): This is not the best place to setup a cache. We would - // really like to decouple the cache from the backend but also have the - // manifeset service use the layer service cache. For now, we can simply - // integrate the cache directly. The main issue is that we have layer - // access and layer data coupled in a single object. Work is already under - // way to decouple this. - - return &cachedLayerService{ - LayerService: ls, - repository: repo, - ctx: repo.ctx, - driver: repo.driver, - blobStore: repo.blobStore, - cache: repo.registry.layerInfoCache, + if repo.descriptorCache != nil { + statter = &cachedBlobStatter{ + cache: repo.descriptorCache, + backend: statter, } } - return ls + return &linkedBlobStore{ + blobStore: repo.blobStore, + blobServer: repo.blobServer, + statter: statter, + repository: repo, + ctx: ctx, + + // TODO(stevvooe): linkPath limits this blob store to only layers. + // This instance cannot be used for manifest checks. + linkPath: blobLinkPath, + } } func (repo *repository) Signatures() distribution.SignatureService { return &signatureStore{ repository: repo, + blobStore: repo.blobStore, + ctx: repo.ctx, } } diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index 066ce972..9838bff2 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -3,8 +3,8 @@ package storage import ( "encoding/json" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" @@ -12,47 +12,56 @@ import ( // revisionStore supports storing and managing manifest revisions. type revisionStore struct { - *repository + repository *repository + blobStore *linkedBlobStore + ctx context.Context } -// exists returns true if the revision is available in the named repository. -func (rs *revisionStore) exists(revision digest.Digest) (bool, error) { - revpath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return false, err +func newRevisionStore(ctx context.Context, repo *repository, blobStore *blobStore) *revisionStore { + return &revisionStore{ + ctx: ctx, + repository: repo, + blobStore: &linkedBlobStore{ + blobStore: blobStore, + repository: repo, + ctx: ctx, + linkPath: manifestRevisionLinkPath, + }, } - - exists, err := exists(rs.repository.ctx, rs.driver, revpath) - if err != nil { - return false, err - } - - return exists, nil } // get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, error) { +func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { // Ensure that this revision is available in this repository. - if exists, err := rs.exists(revision); err != nil { - return nil, err - } else if !exists { - return nil, distribution.ErrUnknownManifestRevision{ - Name: rs.Name(), - Revision: revision, + _, err := rs.blobStore.Stat(ctx, revision) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } } + + return nil, err } - content, err := rs.blobStore.get(revision) + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := rs.blobStore.Get(ctx, revision) if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: rs.repository.Name(), + Revision: revision, + } + } + return nil, err } // Fetch the signatures for the manifest - signatures, err := rs.Signatures().Get(revision) + signatures, err := rs.repository.Signatures().Get(revision) if err != nil { return nil, err } @@ -78,69 +87,34 @@ func (rs *revisionStore) get(revision digest.Digest) (*manifest.SignedManifest, // put stores the manifest in the repository, if not already present. Any // updated signatures will be stored, as well. -func (rs *revisionStore) put(sm *manifest.SignedManifest) (digest.Digest, error) { +func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) (distribution.Descriptor, error) { // Resolve the payload in the manifest. payload, err := sm.Payload() if err != nil { - return "", err + return distribution.Descriptor{}, err } // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.put(payload) + revision, err := rs.blobStore.Put(ctx, manifest.ManifestMediaType, payload) if err != nil { - logrus.Errorf("error putting payload into blobstore: %v", err) - return "", err + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return distribution.Descriptor{}, err } // Link the revision into the repository. - if err := rs.link(revision); err != nil { - return "", err + if err := rs.blobStore.linkBlob(ctx, revision); err != nil { + return distribution.Descriptor{}, err } // Grab each json signature and store them. signatures, err := sm.Signatures() if err != nil { - return "", err + return distribution.Descriptor{}, err } - if err := rs.Signatures().Put(revision, signatures...); err != nil { - return "", err + if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { + return distribution.Descriptor{}, err } return revision, nil } - -// link links the revision into the repository. -func (rs *revisionStore) link(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionLinkPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - if exists, err := exists(rs.repository.ctx, rs.driver, revisionPath); err != nil { - return err - } else if exists { - // Revision has already been linked! - return nil - } - - return rs.blobStore.link(revisionPath, revision) -} - -// delete removes the specified manifest revision from storage. -func (rs *revisionStore) delete(revision digest.Digest) error { - revisionPath, err := rs.pm.path(manifestRevisionPathSpec{ - name: rs.Name(), - revision: revision, - }) - - if err != nil { - return err - } - - return rs.driver.Delete(rs.repository.ctx, revisionPath) -} diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index fcf6224f..f6c23e27 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -10,14 +10,24 @@ import ( ) type signatureStore struct { - *repository + repository *repository + blobStore *blobStore + ctx context.Context +} + +func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { + return &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + } } var _ distribution.SignatureService = &signatureStore{} func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := s.pm.path(manifestSignaturesPathSpec{ - name: s.Name(), + signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{ + name: s.repository.Name(), revision: dgst, }) @@ -30,7 +40,7 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { // can be eliminated by implementing listAll on drivers. signaturesPath = path.Join(signaturesPath, "sha256") - signaturePaths, err := s.driver.List(s.repository.ctx, signaturesPath) + signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) if err != nil { return nil, err } @@ -43,27 +53,32 @@ func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { } ch := make(chan result) + bs := s.linkedBlobStore(s.ctx, dgst) for i, sigPath := range signaturePaths { - // Append the link portion - sigPath = path.Join(sigPath, "link") + sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + if err != nil { + context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) + continue + } wg.Add(1) - go func(idx int, sigPath string) { + go func(idx int, sigdgst digest.Digest) { defer wg.Done() context.GetLogger(s.ctx). - Debugf("fetching signature from %q", sigPath) + Debugf("fetching signature %q", sigdgst) r := result{index: idx} - if p, err := s.blobStore.linked(sigPath); err != nil { + + if p, err := bs.Get(s.ctx, sigdgst); err != nil { context.GetLogger(s.ctx). - Errorf("error fetching signature from %q: %v", sigPath, err) + Errorf("error fetching signature %q: %v", sigdgst, err) r.err = err } else { r.signature = p } ch <- r - }(i, sigPath) + }(i, sigdgst) } done := make(chan struct{}) go func() { @@ -91,25 +106,36 @@ loop: } func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { + bs := s.linkedBlobStore(s.ctx, dgst) for _, signature := range signatures { - signatureDigest, err := s.blobStore.put(signature) - if err != nil { - return err - } - - signaturePath, err := s.pm.path(manifestSignatureLinkPathSpec{ - name: s.Name(), - revision: dgst, - signature: signatureDigest, - }) - - if err != nil { - return err - } - - if err := s.blobStore.link(signaturePath, signatureDigest); err != nil { + if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { return err } } return nil } + +// namedBlobStore returns the namedBlobStore of the signatures for the +// manifest with the given digest. Effectively, each singature link path +// layout is a unique linked blob store. +func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { + linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestSignatureLinkPathSpec{ + name: name, + revision: revision, + signature: dgst, + }) + } + + return &linkedBlobStore{ + ctx: ctx, + repository: s.repository, + blobStore: s.blobStore, + statter: &linkedBlobStatter{ + blobStore: s.blobStore, + repository: s.repository, + linkPath: linkpath, + }, + linkPath: linkpath, + } +} diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 882e6c35..a74d9b09 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -4,31 +4,33 @@ import ( "path" "github.com/docker/distribution" - // "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // tagStore provides methods to manage manifest tags in a backend storage driver. type tagStore struct { - *repository + repository *repository + blobStore *blobStore + ctx context.Context } // tags lists the manifest tags for the specified repository. func (ts *tagStore) tags() ([]string, error) { - p, err := ts.pm.path(manifestTagPathSpec{ - name: ts.name, + p, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), }) if err != nil { return nil, err } var tags []string - entries, err := ts.driver.List(ts.repository.ctx, p) + entries, err := ts.blobStore.driver.List(ts.ctx, p) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.name} + return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} default: return nil, err } @@ -45,15 +47,15 @@ func (ts *tagStore) tags() ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(tag string) (bool, error) { - tagPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) if err != nil { return false, err } - exists, err := exists(ts.repository.ctx, ts.driver, tagPath) + exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } @@ -64,18 +66,8 @@ func (ts *tagStore) exists(tag string) (bool, error) { // tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) tag(tag string, revision digest.Digest) error { - indexEntryPath, err := ts.pm.path(manifestTagIndexEntryLinkPathSpec{ - name: ts.Name(), - tag: tag, - revision: revision, - }) - - if err != nil { - return err - } - - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) @@ -83,77 +75,69 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { return err } + nbs := ts.linkedBlobStore(ts.ctx, tag) // Link into the index - if err := ts.blobStore.link(indexEntryPath, revision); err != nil { + if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { return err } // Overwrite the current link - return ts.blobStore.link(currentPath, revision) + return ts.blobStore.link(ts.ctx, currentPath, revision) } // resolve the current revision for name and tag. func (ts *tagStore) resolve(tag string) (digest.Digest, error) { - currentPath, err := ts.pm.path(manifestTagCurrentPathSpec{ - name: ts.Name(), + currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + name: ts.repository.Name(), tag: tag, }) - if err != nil { return "", err } - if exists, err := exists(ts.repository.ctx, ts.driver, currentPath); err != nil { - return "", err - } else if !exists { - return "", distribution.ErrManifestUnknown{Name: ts.Name(), Tag: tag} - } - - revision, err := ts.blobStore.readlink(currentPath) + revision, err := ts.blobStore.readlink(ts.ctx, currentPath) if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + } + return "", err } return revision, nil } -// revisions returns all revisions with the specified name and tag. -func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) { - manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{ - name: ts.Name(), - tag: tag, - }) - - if err != nil { - return nil, err - } - - // TODO(stevvooe): Need to append digest alg to get listing of revisions. - manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256") - - entries, err := ts.driver.List(ts.repository.ctx, manifestTagIndexPath) - if err != nil { - return nil, err - } - - var revisions []digest.Digest - for _, entry := range entries { - revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry))) - } - - return revisions, nil -} - // delete removes the tag from repository, including the history of all // revisions that have the specified tag. func (ts *tagStore) delete(tag string) error { - tagPath, err := ts.pm.path(manifestTagPathSpec{ - name: ts.Name(), + tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{ + name: ts.repository.Name(), tag: tag, }) if err != nil { return err } - return ts.driver.Delete(ts.repository.ctx, tagPath) + return ts.blobStore.driver.Delete(ts.ctx, tagPath) +} + +// namedBlobStore returns the namedBlobStore for the named tag, allowing one +// to index manifest blobs by tag name. While the tag store doesn't map +// precisely to the linked blob store, using this ensures the links are +// managed via the same code path. +func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { + return &linkedBlobStore{ + blobStore: ts.blobStore, + repository: ts.repository, + ctx: ctx, + linkPath: func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + return pm.path(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + }, + } + } diff --git a/docs/storage/util.go b/docs/storage/util.go new file mode 100644 index 00000000..773d7ba0 --- /dev/null +++ b/docs/storage/util.go @@ -0,0 +1,21 @@ +package storage + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// Exists provides a utility method to test whether or not a path exists in +// the given driver. +func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { + if _, err := drv.Stat(ctx, path); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return false, nil + default: + return false, err + } + } + + return true, nil +} From 4c8e4dc373e7cd61524751829bc58106e80b8cb5 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 17 Apr 2015 13:32:51 -0700 Subject: [PATCH 106/501] Add client implementation of distribution interface Adds functionality to create a Repository client which connects to a remote endpoint. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/authchallenge.go | 150 ++++++++ docs/client/endpoint.go | 266 +++++++++++++ docs/client/errors.go | 37 ++ docs/client/repository.go | 657 +++++++++++++++++++++++++++++++++ docs/client/repository_test.go | 605 ++++++++++++++++++++++++++++++ docs/client/token.go | 78 ++++ 6 files changed, 1793 insertions(+) create mode 100644 docs/client/authchallenge.go create mode 100644 docs/client/endpoint.go create mode 100644 docs/client/repository.go create mode 100644 docs/client/repository_test.go create mode 100644 docs/client/token.go diff --git a/docs/client/authchallenge.go b/docs/client/authchallenge.go new file mode 100644 index 00000000..0485f42d --- /dev/null +++ b/docs/client/authchallenge.go @@ -0,0 +1,150 @@ +package client + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +type octetType byte + +// AuthorizationChallenge carries information +// from a WWW-Authenticate response header. +type AuthorizationChallenge struct { + Scheme string + Parameters map[string]string +} + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []AuthorizationChallenge { + var challenges []AuthorizationChallenge + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, AuthorizationChallenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + i; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/docs/client/endpoint.go b/docs/client/endpoint.go new file mode 100644 index 00000000..83d3d991 --- /dev/null +++ b/docs/client/endpoint.go @@ -0,0 +1,266 @@ +package client + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/v2" +) + +// Authorizer is used to apply Authorization to an HTTP request +type Authorizer interface { + // Authorizer updates an HTTP request with the needed authorization + Authorize(req *http.Request) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) +} + +// RepositoryEndpoint represents a single host endpoint serving up +// the distribution API. +type RepositoryEndpoint struct { + Endpoint string + Mirror bool + + Header http.Header + Credentials CredentialStore + + ub *v2.URLBuilder +} + +type nullAuthorizer struct{} + +func (na nullAuthorizer) Authorize(req *http.Request) error { + return nil +} + +type repositoryTransport struct { + Transport http.RoundTripper + Header http.Header + Authorizer Authorizer +} + +func (rt *repositoryTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqCopy := new(http.Request) + *reqCopy = *req + + // Copy existing headers then static headers + reqCopy.Header = make(http.Header, len(req.Header)+len(rt.Header)) + for k, s := range req.Header { + reqCopy.Header[k] = append([]string(nil), s...) + } + for k, s := range rt.Header { + reqCopy.Header[k] = append(reqCopy.Header[k], s...) + } + + if rt.Authorizer != nil { + if err := rt.Authorizer.Authorize(reqCopy); err != nil { + return nil, err + } + } + + logrus.Debugf("HTTP: %s %s", req.Method, req.URL) + + if rt.Transport != nil { + return rt.Transport.RoundTrip(reqCopy) + } + return http.DefaultTransport.RoundTrip(reqCopy) +} + +type authTransport struct { + Transport http.RoundTripper + Header http.Header +} + +func (rt *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqCopy := new(http.Request) + *reqCopy = *req + + // Copy existing headers then static headers + reqCopy.Header = make(http.Header, len(req.Header)+len(rt.Header)) + for k, s := range req.Header { + reqCopy.Header[k] = append([]string(nil), s...) + } + for k, s := range rt.Header { + reqCopy.Header[k] = append(reqCopy.Header[k], s...) + } + + logrus.Debugf("HTTP: %s %s", req.Method, req.URL) + + if rt.Transport != nil { + return rt.Transport.RoundTrip(reqCopy) + } + return http.DefaultTransport.RoundTrip(reqCopy) +} + +// URLBuilder returns a new URL builder +func (e *RepositoryEndpoint) URLBuilder() (*v2.URLBuilder, error) { + if e.ub == nil { + var err error + e.ub, err = v2.NewURLBuilderFromString(e.Endpoint) + if err != nil { + return nil, err + } + } + + return e.ub, nil +} + +// HTTPClient returns a new HTTP client configured for this endpoint +func (e *RepositoryEndpoint) HTTPClient(name string) (*http.Client, error) { + transport := &repositoryTransport{ + Header: e.Header, + } + client := &http.Client{ + Transport: transport, + } + + challenges, err := e.ping(client) + if err != nil { + return nil, err + } + actions := []string{"pull"} + if !e.Mirror { + actions = append(actions, "push") + } + + transport.Authorizer = &endpointAuthorizer{ + client: &http.Client{Transport: &authTransport{Header: e.Header}}, + challenges: challenges, + creds: e.Credentials, + resource: "repository", + scope: name, + actions: actions, + } + + return client, nil +} + +func (e *RepositoryEndpoint) ping(client *http.Client) ([]AuthorizationChallenge, error) { + ub, err := e.URLBuilder() + if err != nil { + return nil, err + } + u, err := ub.BuildBaseURL() + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + req.Header = make(http.Header, len(e.Header)) + for k, s := range e.Header { + req.Header[k] = append([]string(nil), s...) + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var supportsV2 bool +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } + } + } + + if !supportsV2 { + return nil, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e.Endpoint) + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header), nil + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) + } + + return nil, nil +} + +type endpointAuthorizer struct { + client *http.Client + challenges []AuthorizationChallenge + creds CredentialStore + + resource string + scope string + actions []string + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +func (ta *endpointAuthorizer) Authorize(req *http.Request) error { + token, err := ta.getToken() + if err != nil { + return err + } + if token != "" { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + } else if ta.creds != nil { + username, password := ta.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + } + } + return nil +} + +func (ta *endpointAuthorizer) getToken() (string, error) { + ta.tokenLock.Lock() + defer ta.tokenLock.Unlock() + now := time.Now() + if now.Before(ta.tokenExpiration) { + //log.Debugf("Using cached token for %q", ta.auth.Username) + return ta.tokenCache, nil + } + + for _, challenge := range ta.challenges { + switch strings.ToLower(challenge.Scheme) { + case "basic": + // no token necessary + case "bearer": + //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) + params := map[string]string{} + for k, v := range challenge.Parameters { + params[k] = v + } + params["scope"] = fmt.Sprintf("%s:%s:%s", ta.resource, ta.scope, strings.Join(ta.actions, ",")) + token, err := getToken(ta.creds, params, ta.client) + if err != nil { + return "", err + } + ta.tokenCache = token + ta.tokenExpiration = now.Add(time.Minute) + + return token, nil + default: + //log.Infof("Unsupported auth scheme: %q", challenge.Scheme) + } + } + + // Do not expire cache since there are no challenges which use a token + ta.tokenExpiration = time.Now().Add(time.Hour * 24) + + return "", nil +} diff --git a/docs/client/errors.go b/docs/client/errors.go index 3e89e674..4ef2cc23 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -1,9 +1,14 @@ package client import ( + "bytes" + "encoding/json" "fmt" + "io/ioutil" + "net/http" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" ) // RepositoryNotFoundError is returned when making an operation against a @@ -77,3 +82,35 @@ type UnexpectedHTTPStatusError struct { func (e *UnexpectedHTTPStatusError) Error() string { return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) } + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + shortenedResponse := string(e.Response) + if len(shortenedResponse) > 15 { + shortenedResponse = shortenedResponse[:12] + "..." + } + return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), shortenedResponse) +} + +func parseHTTPErrorResponse(response *http.Response) error { + var errors v2.Errors + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return err + } + decoder := json.NewDecoder(bytes.NewReader(body)) + err = decoder.Decode(&errors) + if err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + Response: body, + } + } + return &errors +} diff --git a/docs/client/repository.go b/docs/client/repository.go new file mode 100644 index 00000000..a96390fa --- /dev/null +++ b/docs/client/repository.go @@ -0,0 +1,657 @@ +package client + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "time" + + ctxu "github.com/docker/distribution/context" + + "github.com/docker/distribution/manifest" + + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/v2" + "golang.org/x/net/context" +) + +// NewRepositoryClient creates a new Repository for the given repository name and endpoint +func NewRepositoryClient(ctx context.Context, name string, endpoint *RepositoryEndpoint) (distribution.Repository, error) { + if err := v2.ValidateRespositoryName(name); err != nil { + return nil, err + } + + ub, err := endpoint.URLBuilder() + if err != nil { + return nil, err + } + + client, err := endpoint.HTTPClient(name) + if err != nil { + return nil, err + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + mirror: endpoint.Mirror, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string + mirror bool +} + +func (r *repository) Name() string { + return r.name +} + +func (r *repository) Layers() distribution.LayerService { + return &layers{ + repository: r, + } +} + +func (r *repository) Manifests() distribution.ManifestService { + return &manifests{ + repository: r, + } +} + +func (r *repository) Signatures() distribution.SignatureService { + return &signatures{ + repository: r, + } +} + +type signatures struct { + *repository +} + +func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { + panic("not implemented") +} + +func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { + panic("not implemented") +} + +type manifests struct { + *repository +} + +func (ms *manifests) Tags() ([]string, error) { + panic("not implemented") +} + +func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { + return ms.ExistsByTag(dgst.String()) +} + +func (ms *manifests) ExistsByTag(tag string) (bool, error) { + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + switch { + case resp.StatusCode == http.StatusOK: + return true, nil + case resp.StatusCode == http.StatusNotFound: + return false, nil + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return false, parseHTTPErrorResponse(resp) + default: + return false, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + return ms.GetByTag(dgst.String()) +} + +func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) { + u, err := ms.ub.BuildManifestURL(ms.name, tag) + if err != nil { + return nil, err + } + + resp, err := ms.client.Get(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusOK: + var sm manifest.SignedManifest + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&sm); err != nil { + return nil, err + } + + return &sm, nil + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return nil, parseHTTPErrorResponse(resp) + default: + return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (ms *manifests) Put(m *manifest.SignedManifest) error { + manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) + if err != nil { + return err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + if err != nil { + return err + } + + resp, err := ms.client.Do(putRequest) + if err != nil { + return err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusAccepted: + // TODO(dmcgowan): Use or check digest header + return nil + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return parseHTTPErrorResponse(resp) + default: + return &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (ms *manifests) Delete(dgst digest.Digest) error { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusOK: + return nil + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return parseHTTPErrorResponse(resp) + default: + return &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +type layers struct { + *repository +} + +func sanitizeLocation(location, source string) (string, error) { + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + if locationURL.Scheme == "" { + sourceURL, err := url.Parse(source) + if err != nil { + return "", err + } + locationURL = &url.URL{ + Scheme: sourceURL.Scheme, + Host: sourceURL.Host, + Path: location, + } + location = locationURL.String() + } + return location, nil +} + +func (ls *layers) Exists(dgst digest.Digest) (bool, error) { + _, err := ls.fetchLayer(dgst) + if err != nil { + switch err := err.(type) { + case distribution.ErrUnknownLayer: + return false, nil + default: + return false, err + } + } + + return true, nil +} + +func (ls *layers) Fetch(dgst digest.Digest) (distribution.Layer, error) { + return ls.fetchLayer(dgst) +} + +func (ls *layers) Upload() (distribution.LayerUpload, error) { + u, err := ls.ub.BuildBlobUploadURL(ls.name) + + resp, err := ls.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpLayerUpload{ + layers: ls, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return nil, parseHTTPErrorResponse(resp) + default: + return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (ls *layers) Resume(uuid string) (distribution.LayerUpload, error) { + panic("not implemented") +} + +func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { + u, err := ls.ub.BuildBlobURL(ls.name, dgst) + if err != nil { + return nil, err + } + + resp, err := ls.client.Head(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode == http.StatusOK: + lengthHeader := resp.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing content-length: %v", err) + } + + var t time.Time + lastModified := resp.Header.Get("Last-Modified") + if lastModified != "" { + t, err = http.ParseTime(lastModified) + if err != nil { + return nil, fmt.Errorf("error parsing last-modified: %v", err) + } + } + + return &httpLayer{ + layers: ls, + size: length, + digest: dgst, + createdAt: t, + }, nil + case resp.StatusCode == http.StatusNotFound: + return nil, distribution.ErrUnknownLayer{ + FSLayer: manifest.FSLayer{ + BlobSum: dgst, + }, + } + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return nil, parseHTTPErrorResponse(resp) + default: + return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +type httpLayer struct { + *layers + + size int64 + digest digest.Digest + createdAt time.Time + + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 + err error +} + +func (hl *httpLayer) CreatedAt() time.Time { + return hl.createdAt +} + +func (hl *httpLayer) Digest() digest.Digest { + return hl.digest +} + +func (hl *httpLayer) Read(p []byte) (n int, err error) { + if hl.err != nil { + return 0, hl.err + } + + rd, err := hl.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hl.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && hl.offset >= hl.size { + err = io.EOF + } + + return n, err +} + +func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { + if hl.err != nil { + return 0, hl.err + } + + var err error + newOffset := hl.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = hl.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if hl.offset != newOffset { + hl.reset() + } + + // No problems, set the offset. + hl.offset = newOffset + } + + return hl.offset, err +} + +func (hl *httpLayer) Close() error { + if hl.err != nil { + return hl.err + } + + // close and release reader chain + if hl.rc != nil { + hl.rc.Close() + } + + hl.rc = nil + hl.brd = nil + + hl.err = fmt.Errorf("httpLayer: closed") + + return nil +} + +func (hl *httpLayer) reset() { + if hl.err != nil { + return + } + if hl.rc != nil { + hl.rc.Close() + hl.rc = nil + } +} + +func (hl *httpLayer) reader() (io.Reader, error) { + if hl.err != nil { + return nil, hl.err + } + + if hl.rc != nil { + return hl.brd, nil + } + + // If the offset is great than or equal to size, return a empty, noop reader. + if hl.offset >= hl.size { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + + blobURL, err := hl.ub.BuildBlobURL(hl.name, hl.digest) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", blobURL, nil) + if err != nil { + return nil, err + } + + if hl.offset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("1-")) + ctxu.GetLogger(hl.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hl.client.Do(req) + if err != nil { + return nil, err + } + + switch { + case resp.StatusCode == 200: + hl.rc = resp.Body + default: + defer resp.Body.Close() + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hl.brd == nil { + hl.brd = bufio.NewReader(hl.rc) + } else { + hl.brd.Reset(hl.rc) + } + + return hl.brd, nil +} + +func (hl *httpLayer) Length() int64 { + return hl.size +} + +func (hl *httpLayer) Handler(r *http.Request) (http.Handler, error) { + panic("Not implemented") +} + +type httpLayerUpload struct { + *layers + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +var _ distribution.LayerUpload = &httpLayerUpload{} + +func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hlu.location, r) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hlu.client.Do(req) + if err != nil { + return 0, err + } + + switch { + case resp.StatusCode == http.StatusAccepted: + // TODO(dmcgowan): Validate headers + hlu.uuid = resp.Header.Get("Docker-Upload-UUID") + hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + case resp.StatusCode == http.StatusNotFound: + return 0, &BlobUploadNotFoundError{Location: hlu.location} + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return 0, parseHTTPErrorResponse(resp) + default: + return 0, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hlu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hlu.offset, hlu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hlu.client.Do(req) + if err != nil { + return 0, err + } + + switch { + case resp.StatusCode == http.StatusAccepted: + // TODO(dmcgowan): Validate headers + hlu.uuid = resp.Header.Get("Docker-Upload-UUID") + hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + case resp.StatusCode == http.StatusNotFound: + return 0, &BlobUploadNotFoundError{Location: hlu.location} + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return 0, parseHTTPErrorResponse(resp) + default: + return 0, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hlu.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + return newOffset, errors.New("Cannot seek from end on incomplete upload") + case os.SEEK_SET: + newOffset = int64(offset) + } + + hlu.offset = newOffset + + return hlu.offset, nil +} + +func (hlu *httpLayerUpload) UUID() string { + return hlu.uuid +} + +func (hlu *httpLayerUpload) StartedAt() time.Time { + return hlu.startedAt +} + +func (hlu *httpLayerUpload) Finish(digest digest.Digest) (distribution.Layer, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hlu.location, nil) + if err != nil { + return nil, err + } + + values := req.URL.Query() + values.Set("digest", digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hlu.client.Do(req) + if err != nil { + return nil, err + } + + switch { + case resp.StatusCode == http.StatusCreated: + return hlu.Layers().Fetch(digest) + case resp.StatusCode == http.StatusNotFound: + return nil, &BlobUploadNotFoundError{Location: hlu.location} + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return nil, parseHTTPErrorResponse(resp) + default: + return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (hlu *httpLayerUpload) Cancel() error { + panic("not implemented") +} + +func (hlu *httpLayerUpload) Close() error { + hlu.closed = true + return nil +} diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go new file mode 100644 index 00000000..67138db6 --- /dev/null +++ b/docs/client/repository_test.go @@ -0,0 +1,605 @@ +package client + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "testing" + "time" + + "code.google.com/p/go-uuid/uuid" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/testutil" + "golang.org/x/net/context" +) + +func testServer(rrm testutil.RequestResponseMap) (*RepositoryEndpoint, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + e := RepositoryEndpoint{Endpoint: s.URL, Mirror: false} + return &e, s.Close +} + +func newRandomBlob(size int) (digest.Digest, []byte) { + b := make([]byte, size) + if n, err := rand.Read(b); err != nil { + panic(err) + } else if n != size { + panic("unable to read enough bytes") + } + + dgst, err := digest.FromBytes(b) + if err != nil { + panic(err) + } + + return dgst, b +} + +func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) +} + +func addPing(m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }) +} + +func TestLayerFetch(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + addPing(&m) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), "test.example.com/repo1", e) + if err != nil { + t.Fatal(err) + } + l := r.Layers() + + layer, err := l.Fetch(d1) + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(layer) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b, b1) != 0 { + t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) + } + + // TODO(dmcgowan): Test error cases +} + +func TestLayerExists(t *testing.T) { + d1, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addTestFetch("test.example.com/repo1", d1, b1, &m) + addPing(&m) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), "test.example.com/repo1", e) + if err != nil { + t.Fatal(err) + } + l := r.Layers() + + ok, err := l.Exists(d1) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatalf("Blob does not exist: %s", d1) + } + + // TODO(dmcgowan): Test error cases +} + +func TestLayerUploadChunked(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addPing(&m) + chunks := [][]byte{ + b1[0:256], + b1[256:512], + b1[512:513], + b1[513:1024], + } + repo := "test.example.com/uploadrepo" + uuids := []string{uuid.New()} + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Docker-Upload-UUID": {uuids[0]}, + "Range": {"0-0"}, + }), + }, + }) + offset := 0 + for i, chunk := range chunks { + uuids = append(uuids, uuid.New()) + newOffset := offset + len(chunk) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Body: chunk, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Docker-Upload-UUID": {uuids[i+1]}, + "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, + }), + }, + }) + offset = newOffset + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(offset)}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + l := r.Layers() + + upload, err := l.Upload() + if err != nil { + t.Fatal(err) + } + + if upload.UUID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uuids[0]) + } + + for _, chunk := range chunks { + n, err := upload.Write(chunk) + if err != nil { + t.Fatal(err) + } + if n != len(chunk) { + t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) + } + } + + layer, err := upload.Finish(dgst) + if err != nil { + t.Fatal(err) + } + + if layer.Length() != int64(len(b1)) { + t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) + } +} + +func TestLayerUploadMonolithic(t *testing.T) { + dgst, b1 := newRandomBlob(1024) + var m testutil.RequestResponseMap + addPing(&m) + repo := "test.example.com/uploadrepo" + uploadID := uuid.New() + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Range": {"0-0"}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PATCH", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Body: b1, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Docker-Upload-UUID": {uploadID}, + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + QueryParams: map[string][]string{ + "digest": {dgst.String()}, + }, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(b1))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + l := r.Layers() + + upload, err := l.Upload() + if err != nil { + t.Fatal(err) + } + + if upload.UUID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uploadID) + } + + n, err := upload.ReadFrom(bytes.NewReader(b1)) + if err != nil { + t.Fatal(err) + } + if n != int64(len(b1)) { + t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) + } + + layer, err := upload.Finish(dgst) + if err != nil { + t.Fatal(err) + } + + if layer.Length() != int64(len(b1)) { + t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) + } +} + +func TestLayerUploadResume(t *testing.T) { + // TODO(dmcgowan): implement +} + +func newRandomSchema1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { + blobs := make([]manifest.FSLayer, blobCount) + history := make([]manifest.History, blobCount) + + for i := 0; i < blobCount; i++ { + dgst, blob := newRandomBlob((i % 5) * 16) + + blobs[i] = manifest.FSLayer{BlobSum: dgst} + history[i] = manifest.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + } + + m := &manifest.SignedManifest{ + Manifest: manifest.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + }, + } + manifestBytes, err := json.Marshal(m) + if err != nil { + panic(err) + } + dgst, err := digest.FromBytes(manifestBytes) + if err != nil { + panic(err) + } + + m.Raw = manifestBytes + + return m, dgst +} + +func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + +} + +func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { + if m1.Name != m2.Name { + return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) + } + if m1.Tag != m2.Tag { + return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) + } + if len(m1.FSLayers) != len(m2.FSLayers) { + return fmt.Errorf("fs layer length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + } + for i := range m1.FSLayers { + if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { + return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) + } + } + if len(m1.History) != len(m2.History) { + return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) + } + for i := range m1.History { + if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { + return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) + } + } + return nil +} + +func TestManifestFetch(t *testing.T) { + repo := "test.example.com/repo" + m1, dgst := newRandomSchema1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addPing(&m) + addTestManifest(repo, dgst.String(), m1.Raw, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + ms := r.Manifests() + + ok, err := ms.Exists(dgst) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.Get(dgst) + if err != nil { + t.Fatal(err) + } + if err := checkEqualManifest(manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestFetchByTag(t *testing.T) { + repo := "test.example.com/repo/by/tag" + m1, _ := newRandomSchema1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addPing(&m) + addTestManifest(repo, "latest", m1.Raw, &m) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + + ms := r.Manifests() + ok, err := ms.ExistsByTag("latest") + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("Manifest does not exist") + } + + manifest, err := ms.GetByTag("latest") + if err != nil { + t.Fatal(err) + } + if err := checkEqualManifest(manifest, m1); err != nil { + t.Fatal(err) + } +} + +func TestManifestDelete(t *testing.T) { + repo := "test.example.com/repo/delete" + _, dgst1 := newRandomSchema1Manifest(repo, "latest", 6) + _, dgst2 := newRandomSchema1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + addPing(&m) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + + ms := r.Manifests() + if err := ms.Delete(dgst1); err != nil { + t.Fatal(err) + } + if err := ms.Delete(dgst2); err == nil { + t.Fatal("Expected error deleting unknown manifest") + } + // TODO(dmcgowan): Check for specific unknown error +} + +func TestManifestPut(t *testing.T) { + repo := "test.example.com/repo/delete" + m1, dgst := newRandomSchema1Manifest(repo, "other", 6) + var m testutil.RequestResponseMap + addPing(&m) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo + "/manifests/other", + Body: m1.Raw, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepositoryClient(context.Background(), repo, e) + if err != nil { + t.Fatal(err) + } + + ms := r.Manifests() + if err := ms.Put(m1); err != nil { + t.Fatal(err) + } + + // TODO(dmcgowan): Check for error cases +} diff --git a/docs/client/token.go b/docs/client/token.go new file mode 100644 index 00000000..6439e01e --- /dev/null +++ b/docs/client/token.go @@ -0,0 +1,78 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type tokenResponse struct { + Token string `json:"token"` +} + +func getToken(creds CredentialStore, params map[string]string, client *http.Client) (token string, err error) { + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + // TODO(dmcgowan): Handle empty scheme + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := params["scope"] + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if creds != nil { + username, password := creds.Basic(realmURL) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} From 837a12db15700bccef76463ad95f79301d821e33 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 6 May 2015 11:12:33 -0700 Subject: [PATCH 107/501] Remove deprecated client interface Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/client.go | 573 ------------------------------------- docs/client/client_test.go | 440 ---------------------------- docs/client/objectstore.go | 239 ---------------- docs/client/pull.go | 151 ---------- docs/client/push.go | 137 --------- 5 files changed, 1540 deletions(-) delete mode 100644 docs/client/client.go delete mode 100644 docs/client/client_test.go delete mode 100644 docs/client/objectstore.go delete mode 100644 docs/client/pull.go delete mode 100644 docs/client/push.go diff --git a/docs/client/client.go b/docs/client/client.go deleted file mode 100644 index 36be960d..00000000 --- a/docs/client/client.go +++ /dev/null @@ -1,573 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "regexp" - "strconv" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" -) - -// Client implements the client interface to the registry http api -type Client interface { - // GetImageManifest returns an image manifest for the image at the given - // name, tag pair. - GetImageManifest(name, tag string) (*manifest.SignedManifest, error) - - // PutImageManifest uploads an image manifest for the image at the given - // name, tag pair. - PutImageManifest(name, tag string, imageManifest *manifest.SignedManifest) error - - // DeleteImage removes the image at the given name, tag pair. - DeleteImage(name, tag string) error - - // ListImageTags returns a list of all image tags with the given repository - // name. - ListImageTags(name string) ([]string, error) - - // BlobLength returns the length of the blob stored at the given name, - // digest pair. - // Returns a length value of -1 on error or if the blob does not exist. - BlobLength(name string, dgst digest.Digest) (int, error) - - // GetBlob returns the blob stored at the given name, digest pair in the - // form of an io.ReadCloser with the length of this blob. - // A nonzero byteOffset can be provided to receive a partial blob beginning - // at the given offset. - GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) - - // InitiateBlobUpload starts a blob upload in the given repository namespace - // and returns a unique location url to use for other blob upload methods. - InitiateBlobUpload(name string) (string, error) - - // GetBlobUploadStatus returns the byte offset and length of the blob at the - // given upload location. - GetBlobUploadStatus(location string) (int, int, error) - - // UploadBlob uploads a full blob to the registry. - UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error - - // UploadBlobChunk uploads a blob chunk with a given length and startByte to - // the registry. - // FinishChunkedBlobUpload must be called to finalize this upload. - UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error - - // FinishChunkedBlobUpload completes a chunked blob upload at a given - // location. - FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error - - // CancelBlobUpload deletes all content at the unfinished blob upload - // location and invalidates any future calls to this blob upload. - CancelBlobUpload(location string) error -} - -var ( - patternRangeHeader = regexp.MustCompile("bytes=0-(\\d+)/(\\d+)") -) - -// New returns a new Client which operates against a registry with the -// given base endpoint -// This endpoint should not include /v2/ or any part of the url after this. -func New(endpoint string) (Client, error) { - ub, err := v2.NewURLBuilderFromString(endpoint) - if err != nil { - return nil, err - } - - return &clientImpl{ - endpoint: endpoint, - ub: ub, - }, nil -} - -// clientImpl is the default implementation of the Client interface -type clientImpl struct { - endpoint string - ub *v2.URLBuilder -} - -// TODO(bbland): use consistent route generation between server and client - -func (r *clientImpl) GetImageManifest(name, tag string) (*manifest.SignedManifest, error) { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return nil, err - } - - response, err := http.Get(manifestURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - decoder := json.NewDecoder(response.Body) - - manifest := new(manifest.SignedManifest) - err = decoder.Decode(manifest) - if err != nil { - return nil, err - } - return manifest, nil -} - -func (r *clientImpl) PutImageManifest(name, tag string, manifest *manifest.SignedManifest) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw)) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK || response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errors v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) - if err != nil { - return err - } - - return &errors - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) DeleteImage(name, tag string) error { - manifestURL, err := r.ub.BuildManifestURL(name, tag) - if err != nil { - return err - } - - deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - break - case response.StatusCode == http.StatusNotFound: - return &ImageManifestNotFoundError{Name: name, Tag: tag} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } - - return nil -} - -func (r *clientImpl) ListImageTags(name string) ([]string, error) { - tagsURL, err := r.ub.BuildTagsURL(name) - if err != nil { - return nil, err - } - - response, err := http.Get(tagsURL) - if err != nil { - return nil, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - break - case response.StatusCode == http.StatusNotFound: - return nil, &RepositoryNotFoundError{Name: name} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, err - } - return nil, &errs - default: - return nil, &UnexpectedHTTPStatusError{Status: response.Status} - } - - tags := struct { - Tags []string `json:"tags"` - }{} - - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&tags) - if err != nil { - return nil, err - } - - return tags.Tags, nil -} - -func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return -1, err - } - - response, err := http.Head(blobURL) - if err != nil { - return -1, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return -1, err - } - return int(length), nil - case response.StatusCode == http.StatusNotFound: - return -1, nil - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return -1, err - } - return -1, &errs - default: - return -1, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { - blobURL, err := r.ub.BuildBlobURL(name, dgst) - if err != nil { - return nil, 0, err - } - - getRequest, err := http.NewRequest("GET", blobURL, nil) - if err != nil { - return nil, 0, err - } - - getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset)) - response, err := http.DefaultClient.Do(getRequest) - if err != nil { - return nil, 0, err - } - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusOK: - lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 0) - if err != nil { - return nil, 0, err - } - return response.Body, int(length), nil - case response.StatusCode == http.StatusNotFound: - response.Body.Close() - return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return nil, 0, err - } - return nil, 0, &errs - default: - response.Body.Close() - return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { - uploadURL, err := r.ub.BuildBlobUploadURL(name) - if err != nil { - return "", err - } - - postRequest, err := http.NewRequest("POST", uploadURL, nil) - if err != nil { - return "", err - } - - response, err := http.DefaultClient.Do(postRequest) - if err != nil { - return "", err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return response.Header.Get("Location"), nil - // case response.StatusCode == http.StatusNotFound: - // return - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return "", err - } - return "", &errs - default: - return "", &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { - response, err := http.Get(location) - if err != nil { - return 0, 0, err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return parseRangeHeader(response.Header.Get("Range")) - case response.StatusCode == http.StatusNotFound: - return 0, 0, &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return 0, 0, err - } - return 0, 0, &errs - default: - return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { - defer blob.Close() - - putRequest, err := http.NewRequest("PUT", location, blob) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { - defer blobChunk.Close() - - putRequest, err := http.NewRequest("PUT", location, blobChunk) - if err != nil { - return err - } - - endByte := startByte + length - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", fmt.Sprint(length)) - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusAccepted: - return nil - case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: - lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range")) - if err != nil { - return err - } - return &BlobUploadInvalidRangeError{ - Location: location, - LastValidRange: lastValidRange, - BlobSize: blobSize, - } - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { - putRequest, err := http.NewRequest("PUT", location, nil) - if err != nil { - return err - } - - values := putRequest.URL.Query() - values.Set("digest", dgst.String()) - putRequest.URL.RawQuery = values.Encode() - - putRequest.Header.Set("Content-Type", "application/octet-stream") - putRequest.Header.Set("Content-Length", "0") - putRequest.Header.Set("Content-Range", - fmt.Sprintf("%d-%d/%d", length, length, length)) - - response, err := http.DefaultClient.Do(putRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusCreated: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -func (r *clientImpl) CancelBlobUpload(location string) error { - deleteRequest, err := http.NewRequest("DELETE", location, nil) - if err != nil { - return err - } - - response, err := http.DefaultClient.Do(deleteRequest) - if err != nil { - return err - } - defer response.Body.Close() - - // TODO(bbland): handle other status codes, like 5xx errors - switch { - case response.StatusCode == http.StatusNoContent: - return nil - case response.StatusCode == http.StatusNotFound: - return &BlobUploadNotFoundError{Location: location} - case response.StatusCode >= 400 && response.StatusCode < 500: - var errs v2.Errors - decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errs) - if err != nil { - return err - } - return &errs - default: - return &UnexpectedHTTPStatusError{Status: response.Status} - } -} - -// parseRangeHeader parses out the offset and length from a returned Range -// header -func parseRangeHeader(byteRangeHeader string) (int, int, error) { - submatches := patternRangeHeader.FindStringSubmatch(byteRangeHeader) - if submatches == nil || len(submatches) < 3 { - return 0, 0, fmt.Errorf("Malformed Range header") - } - - offset, err := strconv.Atoi(submatches[1]) - if err != nil { - return 0, 0, err - } - length, err := strconv.Atoi(submatches[2]) - if err != nil { - return 0, 0, err - } - return offset, length, nil -} diff --git a/docs/client/client_test.go b/docs/client/client_test.go deleted file mode 100644 index 2c1d1cc2..00000000 --- a/docs/client/client_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "sync" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/testutil" -) - -type testBlob struct { - digest digest.Digest - contents []byte -} - -func TestRangeHeaderParser(t *testing.T) { - const ( - malformedRangeHeader = "bytes=0-A/C" - emptyRangeHeader = "" - rFirst = 100 - rSecond = 200 - ) - - var ( - wellformedRangeHeader = fmt.Sprintf("bytes=0-%d/%d", rFirst, rSecond) - ) - - if _, _, err := parseRangeHeader(malformedRangeHeader); err == nil { - t.Fatalf("malformedRangeHeader: error expected, got nil") - } - - if _, _, err := parseRangeHeader(emptyRangeHeader); err == nil { - t.Fatalf("emptyRangeHeader: error expected, got nil") - } - - first, second, err := parseRangeHeader(wellformedRangeHeader) - if err != nil { - t.Fatalf("wellformedRangeHeader: unexpected error %v", err) - } - - if first != rFirst || second != rSecond { - t.Fatalf("Range has been parsed unproperly: %d/%d", first, second) - } - -} - -func TestPush(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - uploadLocations := make([]string, len(testBlobs)) - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - // TODO(bbland): this is returning the same location for all uploads, - // because we can't know which blob will get which location. - // It's sort of okay because we're using unique digests, but this needs - // to change at some point. - uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name) - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - var err error - m.Raw, err = json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "POST", - Route: "/v2/" + name + "/blobs/uploads/", - }, - Response: testutil.Response{ - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {uploadLocations[i]}, - }), - }, - } - blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: uploadLocations[i], - QueryParams: map[string][]string{ - "digest": {blob.digest.String()}, - }, - Body: blob.contents, - }, - Response: testutil.Response{ - StatusCode: http.StatusCreated, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + name + "/manifests/" + tag, - Body: m.Raw, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - }, - })) - var server *httptest.Server - - // HACK(stevvooe): Super hack to follow: the request response map approach - // above does not let us correctly format the location header to the - // server url. This handler intercepts and re-writes the location header - // to the server url. - - hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL} - handler.ServeHTTP(w, r) - }) - - server = httptest.NewServer(hack) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - writer, err := l.Writer() - if err != nil { - t.Fatal(err) - } - - writer.SetSize(len(blob.contents)) - writer.Write(blob.contents) - writer.Close() - } - - objectStore.WriteManifest(name, tag, m) - - err = Push(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } -} - -func TestPull(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - blobs := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, blob := range testBlobs { - blobs[i] = manifest.FSLayer{BlobSum: blob.digest} - history[i] = manifest.History{V1Compatibility: blob.digest.String()} - } - - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - }, - } - manifestBytes, err := json.Marshal(m) - - blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs)) - for i, blob := range testBlobs { - blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents, - }, - } - } - - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - })) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - err = Pull(client, objectStore, name, tag) - if err != nil { - t.Fatal(err) - } - - m, err = objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - blobBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(blobBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -func TestPullResume(t *testing.T) { - name := "hello/world" - tag := "sometag" - testBlobs := []testBlob{ - { - digest: "tarsum.v2+sha256:12345", - contents: []byte("some contents"), - }, - { - digest: "tarsum.v2+sha256:98765", - contents: []byte("some other contents"), - }, - } - layers := make([]manifest.FSLayer, len(testBlobs)) - history := make([]manifest.History, len(testBlobs)) - - for i, layer := range testBlobs { - layers[i] = manifest.FSLayer{BlobSum: layer.digest} - history[i] = manifest.History{V1Compatibility: layer.digest.String()} - } - - m := &manifest.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: layers, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - } - manifestBytes, err := json.Marshal(m) - - layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) - for i, blob := range testBlobs { - layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[:len(blob.contents)/2], - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(blob.contents))}, - }), - }, - } - layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/blobs/" + blob.digest.String(), - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: blob.contents[len(blob.contents)/2:], - }, - } - } - - for i := 0; i < 3; i++ { - layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifests/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, - }) - } - - handler := testutil.NewHandler(layerRequestResponseMappings) - server := httptest.NewServer(handler) - client, err := New(server.URL) - if err != nil { - t.Fatalf("error creating client: %v", err) - } - objectStore := &memoryObjectStore{ - mutex: new(sync.Mutex), - manifestStorage: make(map[string]*manifest.SignedManifest), - layerStorage: make(map[digest.Digest]Layer), - } - - for attempts := 0; attempts < 3; attempts++ { - err = Pull(client, objectStore, name, tag) - if err == nil { - break - } - } - - if err != nil { - t.Fatal(err) - } - - sm, err := objectStore.Manifest(name, tag) - if err != nil { - t.Fatal(err) - } - - mBytes, err := json.Marshal(sm) - if err != nil { - t.Fatal(err) - } - - if string(mBytes) != string(manifestBytes) { - t.Fatal("Incorrect manifest") - } - - for _, blob := range testBlobs { - l, err := objectStore.Layer(blob.digest) - if err != nil { - t.Fatal(err) - } - - reader, err := l.Reader() - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - layerBytes, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatal(err) - } - - if string(layerBytes) != string(blob.contents) { - t.Fatal("Incorrect blob") - } - } -} - -// headerInterceptingResponseWriter is a hacky workaround to re-write the -// location header to have the server url. -type headerInterceptingResponseWriter struct { - http.ResponseWriter - serverURL string -} - -func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) { - location := hirw.Header().Get("Location") - if location != "" { - hirw.Header().Set("Location", hirw.serverURL+location) - } - - hirw.ResponseWriter.WriteHeader(status) -} diff --git a/docs/client/objectstore.go b/docs/client/objectstore.go deleted file mode 100644 index 5969c9d2..00000000 --- a/docs/client/objectstore.go +++ /dev/null @@ -1,239 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -var ( - // ErrLayerAlreadyExists is returned when attempting to create a layer with - // a tarsum that is already in use. - ErrLayerAlreadyExists = fmt.Errorf("Layer already exists") - - // ErrLayerLocked is returned when attempting to write to a layer which is - // currently being written to. - ErrLayerLocked = fmt.Errorf("Layer locked") -) - -// ObjectStore is an interface which is designed to approximate the docker -// engine storage. This interface is subject to change to conform to the -// future requirements of the engine. -type ObjectStore interface { - // Manifest retrieves the image manifest stored at the given repository name - // and tag - Manifest(name, tag string) (*manifest.SignedManifest, error) - - // WriteManifest stores an image manifest at the given repository name and - // tag - WriteManifest(name, tag string, manifest *manifest.SignedManifest) error - - // Layer returns a handle to a layer for reading and writing - Layer(dgst digest.Digest) (Layer, error) -} - -// Layer is a generic image layer interface. -// A Layer may not be written to if it is already complete. -type Layer interface { - // Reader returns a LayerReader or an error if the layer has not been - // written to or is currently being written to. - Reader() (LayerReader, error) - - // Writer returns a LayerWriter or an error if the layer has been fully - // written to or is currently being written to. - Writer() (LayerWriter, error) - - // Wait blocks until the Layer can be read from. - Wait() error -} - -// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.ReadCloser interface. -type LayerReader interface { - io.ReadCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int -} - -// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize -// and full Size in addition to implementing the io.WriteCloser interface. -// SetSize must be called on this LayerWriter before it can be written to. -type LayerWriter interface { - io.WriteCloser - - // CurrentSize returns the number of bytes written to the underlying Layer - CurrentSize() int - - // Size returns the full size of the underlying Layer - Size() int - - // SetSize sets the full size of the underlying Layer. - // This must be called before any calls to Write - SetSize(int) error -} - -// memoryObjectStore is an in-memory implementation of the ObjectStore interface -type memoryObjectStore struct { - mutex *sync.Mutex - manifestStorage map[string]*manifest.SignedManifest - layerStorage map[digest.Digest]Layer -} - -func (objStore *memoryObjectStore) Manifest(name, tag string) (*manifest.SignedManifest, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - manifest, ok := objStore.manifestStorage[name+":"+tag] - if !ok { - return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag) - } - return manifest, nil -} - -func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *manifest.SignedManifest) error { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - objStore.manifestStorage[name+":"+tag] = manifest - return nil -} - -func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { - objStore.mutex.Lock() - defer objStore.mutex.Unlock() - - layer, ok := objStore.layerStorage[dgst] - if !ok { - layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} - objStore.layerStorage[dgst] = layer - } - - return layer, nil -} - -type memoryLayer struct { - cond *sync.Cond - contents []byte - expectedSize int - writing bool -} - -func (ml *memoryLayer) Reader() (LayerReader, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return nil, fmt.Errorf("Layer has not been written to yet") - } - if ml.writing { - return nil, ErrLayerLocked - } - - return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil -} - -func (ml *memoryLayer) Writer() (LayerWriter, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents != nil { - if ml.writing { - return nil, ErrLayerLocked - } - if ml.expectedSize == len(ml.contents) { - return nil, ErrLayerAlreadyExists - } - } else { - ml.contents = make([]byte, 0) - } - - ml.writing = true - return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil -} - -func (ml *memoryLayer) Wait() error { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.contents == nil { - return fmt.Errorf("No writer to wait on") - } - - for ml.writing { - ml.cond.Wait() - } - - return nil -} - -type memoryLayerReader struct { - ml *memoryLayer - reader *bytes.Reader -} - -func (mlr *memoryLayerReader) Read(p []byte) (int, error) { - return mlr.reader.Read(p) -} - -func (mlr *memoryLayerReader) Close() error { - return nil -} - -func (mlr *memoryLayerReader) CurrentSize() int { - return len(mlr.ml.contents) -} - -func (mlr *memoryLayerReader) Size() int { - return mlr.ml.expectedSize -} - -type memoryLayerWriter struct { - ml *memoryLayer - buffer *bytes.Buffer -} - -func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { - if mlw.ml.expectedSize == 0 { - return 0, fmt.Errorf("Must set size before writing to layer") - } - wrote, err := mlw.buffer.Write(p) - mlw.ml.contents = mlw.buffer.Bytes() - return wrote, err -} - -func (mlw *memoryLayerWriter) Close() error { - mlw.ml.cond.L.Lock() - defer mlw.ml.cond.L.Unlock() - - return mlw.close() -} - -func (mlw *memoryLayerWriter) close() error { - mlw.ml.writing = false - mlw.ml.cond.Broadcast() - return nil -} - -func (mlw *memoryLayerWriter) CurrentSize() int { - return len(mlw.ml.contents) -} - -func (mlw *memoryLayerWriter) Size() int { - return mlw.ml.expectedSize -} - -func (mlw *memoryLayerWriter) SetSize(size int) error { - if !mlw.ml.writing { - return fmt.Errorf("Layer is closed for writing") - } - mlw.ml.expectedSize = size - return nil -} diff --git a/docs/client/pull.go b/docs/client/pull.go deleted file mode 100644 index 385158db..00000000 --- a/docs/client/pull.go +++ /dev/null @@ -1,151 +0,0 @@ -package client - -import ( - "fmt" - "io" - - log "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPullWindow is the size of the parallel layer pull window. -// A layer may not be pulled until the layer preceeding it by the length of the -// pull window has been successfully pulled. -const simultaneousLayerPullWindow = 4 - -// Pull implements a client pull workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Pull(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := c.GetImageManifest(name, tag) - if err != nil { - return err - } - log.WithField("manifest", manifest).Info("Pulled manifest") - - if len(manifest.FSLayers) != len(manifest.History) { - return fmt.Errorf("Length of history not equal to number of layers") - } - if len(manifest.FSLayers) == 0 { - return fmt.Errorf("Image has no layers") - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - // To avoid leak of goroutines we must notify - // pullLayer goroutines about a cancelation, - // otherwise they will lock forever. - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pulling no more - // than simultaneousLayerPullWindow layers at a time. If an error is - // received from a layer pull, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ { - dependentLayer := i - simultaneousLayerPullWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Pull aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // no chance to recv until cancelCh's closed - } - }(i) - } - } - - err = objectStore.WriteManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to write image manifest") - return err - } - - return nil -} - -func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pulling layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - - layerWriter, err := layer.Writer() - if err == ErrLayerAlreadyExists { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - if err == ErrLayerLocked { - log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") - layer.Wait() - return nil - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - defer layerWriter.Close() - - if layerWriter.CurrentSize() > 0 { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerWriter.CurrentSize(), - "size": layerWriter.Size(), - }).Info("Layer partially downloaded, resuming") - } - - layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize()) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - defer layerReader.Close() - - layerWriter.SetSize(layerWriter.CurrentSize() + length) - - _, err = io.Copy(layerWriter, layerReader) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - if layerWriter.CurrentSize() != layerWriter.Size() { - log.WithFields(log.Fields{ - "size": layerWriter.Size(), - "currentSize": layerWriter.CurrentSize(), - "layer": fsLayer, - }).Warn("Layer invalid size") - return fmt.Errorf( - "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", - fsLayer, layerWriter.Size(), layerWriter.CurrentSize(), - ) - } - return nil -} diff --git a/docs/client/push.go b/docs/client/push.go deleted file mode 100644 index c26bd174..00000000 --- a/docs/client/push.go +++ /dev/null @@ -1,137 +0,0 @@ -package client - -import ( - "fmt" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/manifest" -) - -// simultaneousLayerPushWindow is the size of the parallel layer push window. -// A layer may not be pushed until the layer preceeding it by the length of the -// push window has been successfully pushed. -const simultaneousLayerPushWindow = 4 - -type pushFunction func(fsLayer manifest.FSLayer) error - -// Push implements a client push workflow for the image defined by the given -// name and tag pair, using the given ObjectStore for local manifest and layer -// storage -func Push(c Client, objectStore ObjectStore, name, tag string) error { - manifest, err := objectStore.Manifest(name, tag) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "name": name, - "tag": tag, - }).Info("No image found") - return err - } - - errChans := make([]chan error, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - errChans[i] = make(chan error) - } - - cancelCh := make(chan struct{}) - - // Iterate over each layer in the manifest, simultaneously pushing no more - // than simultaneousLayerPushWindow layers at a time. If an error is - // received from a layer push, we abort the push. - for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ { - dependentLayer := i - simultaneousLayerPushWindow - if dependentLayer >= 0 { - err := <-errChans[dependentLayer] - if err != nil { - log.WithField("error", err).Warn("Push aborted") - close(cancelCh) - return err - } - } - - if i < len(manifest.FSLayers) { - go func(i int) { - select { - case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]): - case <-cancelCh: // recv broadcast notification about cancelation - } - }(i) - } - } - - err = c.PutImageManifest(name, tag, manifest) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "manifest": manifest, - }).Warn("Unable to upload manifest") - return err - } - - return nil -} - -func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer manifest.FSLayer) error { - log.WithField("layer", fsLayer).Info("Pushing layer") - - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - - layerReader, err := layer.Reader() - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - defer layerReader.Close() - - if layerReader.CurrentSize() != layerReader.Size() { - log.WithFields(log.Fields{ - "layer": fsLayer, - "currentSize": layerReader.CurrentSize(), - "size": layerReader.Size(), - }).Warn("Local layer incomplete") - return fmt.Errorf("Local layer incomplete") - } - - length, err := c.BlobLength(name, fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to check existence of remote layer") - return err - } - if length >= 0 { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - - location, err := c.InitiateBlobUpload(name) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - return nil -} From 03e0873125a724b26a1ed2f6f037720598c9d3c1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 7 May 2015 13:16:52 -0700 Subject: [PATCH 108/501] Add unit tests for auth challenge and endpoint Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/authchallenge.go | 2 +- docs/client/authchallenge_test.go | 37 +++++ docs/client/endpoint.go | 2 + docs/client/endpoint_test.go | 259 ++++++++++++++++++++++++++++++ docs/client/repository.go | 4 +- docs/client/repository_test.go | 16 +- 6 files changed, 309 insertions(+), 11 deletions(-) create mode 100644 docs/client/authchallenge_test.go create mode 100644 docs/client/endpoint_test.go diff --git a/docs/client/authchallenge.go b/docs/client/authchallenge.go index 0485f42d..f45704b1 100644 --- a/docs/client/authchallenge.go +++ b/docs/client/authchallenge.go @@ -127,7 +127,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) { p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true - for i = i + i; i < len(s); i++ { + for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: diff --git a/docs/client/authchallenge_test.go b/docs/client/authchallenge_test.go new file mode 100644 index 00000000..bb3016ee --- /dev/null +++ b/docs/client/authchallenge_test.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/http" + "testing" +) + +func TestAuthChallengeParse(t *testing.T) { + header := http.Header{} + header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) + + challenges := parseAuthHeader(header) + if len(challenges) != 1 { + t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) + } + + if expected := "bearer"; challenges[0].Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenges[0].Scheme, expected) + } + + if expected := "https://auth.example.com/token"; challenges[0].Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["realm"], expected) + } + + if expected := "registry.example.com"; challenges[0].Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["service"], expected) + } + + if expected := "fun"; challenges[0].Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["other"], expected) + } + + if expected := "he\"llo"; challenges[0].Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["slashed"], expected) + } + +} diff --git a/docs/client/endpoint.go b/docs/client/endpoint.go index 83d3d991..9889dc66 100644 --- a/docs/client/endpoint.go +++ b/docs/client/endpoint.go @@ -117,6 +117,8 @@ func (e *RepositoryEndpoint) URLBuilder() (*v2.URLBuilder, error) { // HTTPClient returns a new HTTP client configured for this endpoint func (e *RepositoryEndpoint) HTTPClient(name string) (*http.Client, error) { + // TODO(dmcgowan): create http.Transport + transport := &repositoryTransport{ Header: e.Header, } diff --git a/docs/client/endpoint_test.go b/docs/client/endpoint_test.go new file mode 100644 index 00000000..42bdc357 --- /dev/null +++ b/docs/client/endpoint_test.go @@ -0,0 +1,259 @@ +package client + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/distribution/testutil" +) + +type testAuthenticationWrapper struct { + headers http.Header + authCheck func(string) bool + next http.Handler +} + +func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if auth == "" || !w.authCheck(auth) { + h := rw.Header() + for k, values := range w.headers { + h[k] = values + } + rw.WriteHeader(http.StatusUnauthorized) + return + } + w.next.ServeHTTP(rw, r) +} + +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (*RepositoryEndpoint, func()) { + h := testutil.NewHandler(rrm) + wrapper := &testAuthenticationWrapper{ + + headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + "WWW-Authenticate": {authenticate}, + }), + authCheck: authCheck, + next: h, + } + + s := httptest.NewServer(wrapper) + e := RepositoryEndpoint{Endpoint: s.URL, Mirror: false} + return &e, s.Close +} + +type testCredentialStore struct { + username string + password string +} + +func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { + return tcs.username, tcs.password +} + +func TestEndpointAuthorizeToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"badtoken"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te.Endpoint+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + client, err := e.HTTPClient(repo1) + if err != nil { + t.Fatalf("Error creating http client: %s", err) + } + + req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + badCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e2, c2 := testServerWithAuth(m, authenicate, badCheck) + defer c2() + + client2, err := e2.HTTPClient(repo2) + if err != nil { + t.Fatalf("Error creating http client: %s", err) + } + + req, _ = http.NewRequest("GET", e.Endpoint+"/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func TestEndpointAuthorizeTokenBasic(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken"}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + basicCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te.Endpoint+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + e.Credentials = &testCredentialStore{ + username: username, + password: password, + } + + client, err := e.HTTPClient(repo) + if err != nil { + t.Fatalf("Error creating http client: %s", err) + } + + req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} + +func TestEndpointAuthorizeBasic(t *testing.T) { + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + username := "user1" + password := "funSecretPa$$word" + authenicate := fmt.Sprintf("Basic realm=localhost") + validCheck := func(a string) bool { + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + e.Credentials = &testCredentialStore{ + username: username, + password: password, + } + + client, err := e.HTTPClient("test/repo/basic") + if err != nil { + t.Fatalf("Error creating http client: %s", err) + } + + req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } +} diff --git a/docs/client/repository.go b/docs/client/repository.go index a96390fa..578c3fca 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -25,8 +25,8 @@ import ( "golang.org/x/net/context" ) -// NewRepositoryClient creates a new Repository for the given repository name and endpoint -func NewRepositoryClient(ctx context.Context, name string, endpoint *RepositoryEndpoint) (distribution.Repository, error) { +// NewRepository creates a new Repository for the given repository name and endpoint +func NewRepository(ctx context.Context, name string, endpoint *RepositoryEndpoint) (distribution.Repository, error) { if err := v2.ValidateRespositoryName(name); err != nil { return nil, err } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 67138db6..b96c52e5 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -97,7 +97,7 @@ func TestLayerFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), "test.example.com/repo1", e) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestLayerExists(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), "test.example.com/repo1", e) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e) if err != nil { t.Fatal(err) } @@ -227,7 +227,7 @@ func TestLayerUploadChunked(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } @@ -334,7 +334,7 @@ func TestLayerUploadMonolithic(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } @@ -475,7 +475,7 @@ func TestManifestFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } @@ -508,7 +508,7 @@ func TestManifestFetchByTag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } @@ -553,7 +553,7 @@ func TestManifestDelete(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } @@ -591,7 +591,7 @@ func TestManifestPut(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepositoryClient(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e) if err != nil { t.Fatal(err) } From 07cee840a424e81933c7d5a65ac6bf080584d8d1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 7 May 2015 16:11:04 -0700 Subject: [PATCH 109/501] Split layer and upload from repository Layer upload moved to its own file with its own unit tests Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/errors.go | 6 +- docs/client/layer.go | 178 +++++++++++++++++ docs/client/layer_upload.go | 164 ++++++++++++++++ docs/client/layer_upload_test.go | 223 +++++++++++++++++++++ docs/client/repository.go | 326 +------------------------------ 5 files changed, 569 insertions(+), 328 deletions(-) create mode 100644 docs/client/layer.go create mode 100644 docs/client/layer_upload.go create mode 100644 docs/client/layer_upload_test.go diff --git a/docs/client/errors.go b/docs/client/errors.go index 4ef2cc23..e02b0f73 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -1,7 +1,6 @@ package client import ( - "bytes" "encoding/json" "fmt" "io/ioutil" @@ -104,9 +103,8 @@ func parseHTTPErrorResponse(response *http.Response) error { if err != nil { return err } - decoder := json.NewDecoder(bytes.NewReader(body)) - err = decoder.Decode(&errors) - if err != nil { + + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, Response: body, diff --git a/docs/client/layer.go b/docs/client/layer.go new file mode 100644 index 00000000..f61a9034 --- /dev/null +++ b/docs/client/layer.go @@ -0,0 +1,178 @@ +package client + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type httpLayer struct { + *layers + + size int64 + digest digest.Digest + createdAt time.Time + + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 + err error +} + +func (hl *httpLayer) CreatedAt() time.Time { + return hl.createdAt +} + +func (hl *httpLayer) Digest() digest.Digest { + return hl.digest +} + +func (hl *httpLayer) Read(p []byte) (n int, err error) { + if hl.err != nil { + return 0, hl.err + } + + rd, err := hl.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hl.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && hl.offset >= hl.size { + err = io.EOF + } + + return n, err +} + +func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { + if hl.err != nil { + return 0, hl.err + } + + var err error + newOffset := hl.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = hl.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else { + if hl.offset != newOffset { + hl.reset() + } + + // No problems, set the offset. + hl.offset = newOffset + } + + return hl.offset, err +} + +func (hl *httpLayer) Close() error { + if hl.err != nil { + return hl.err + } + + // close and release reader chain + if hl.rc != nil { + hl.rc.Close() + } + + hl.rc = nil + hl.brd = nil + + hl.err = fmt.Errorf("httpLayer: closed") + + return nil +} + +func (hl *httpLayer) reset() { + if hl.err != nil { + return + } + if hl.rc != nil { + hl.rc.Close() + hl.rc = nil + } +} + +func (hl *httpLayer) reader() (io.Reader, error) { + if hl.err != nil { + return nil, hl.err + } + + if hl.rc != nil { + return hl.brd, nil + } + + // If the offset is great than or equal to size, return a empty, noop reader. + if hl.offset >= hl.size { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + + blobURL, err := hl.ub.BuildBlobURL(hl.name, hl.digest) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", blobURL, nil) + if err != nil { + return nil, err + } + + if hl.offset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("1-")) + context.GetLogger(hl.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hl.client.Do(req) + if err != nil { + return nil, err + } + + switch { + case resp.StatusCode == 200: + hl.rc = resp.Body + default: + defer resp.Body.Close() + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hl.brd == nil { + hl.brd = bufio.NewReader(hl.rc) + } else { + hl.brd.Reset(hl.rc) + } + + return hl.brd, nil +} + +func (hl *httpLayer) Length() int64 { + return hl.size +} + +func (hl *httpLayer) Handler(r *http.Request) (http.Handler, error) { + panic("Not implemented") +} diff --git a/docs/client/layer_upload.go b/docs/client/layer_upload.go new file mode 100644 index 00000000..ce0794c2 --- /dev/null +++ b/docs/client/layer_upload.go @@ -0,0 +1,164 @@ +package client + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" +) + +type httpLayerUpload struct { + repo distribution.Repository + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hlu *httpLayerUpload) handleErrorResponse(resp *http.Response) error { + switch { + case resp.StatusCode == http.StatusNotFound: + return &BlobUploadNotFoundError{Location: hlu.location} + case resp.StatusCode >= 400 && resp.StatusCode < 500: + return parseHTTPErrorResponse(resp) + default: + return &UnexpectedHTTPStatusError{Status: resp.Status} + } +} + +func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hlu.location, r) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hlu.client.Do(req) + if err != nil { + return 0, err + } + + if resp.StatusCode != http.StatusAccepted { + return 0, hlu.handleErrorResponse(resp) + } + + // TODO(dmcgowan): Validate headers + hlu.uuid = resp.Header.Get("Docker-Upload-UUID") + hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hlu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hlu.offset, hlu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hlu.client.Do(req) + if err != nil { + return 0, err + } + + if resp.StatusCode != http.StatusAccepted { + return 0, hlu.handleErrorResponse(resp) + } + + // TODO(dmcgowan): Validate headers + hlu.uuid = resp.Header.Get("Docker-Upload-UUID") + hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hlu.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + return newOffset, errors.New("Cannot seek from end on incomplete upload") + case os.SEEK_SET: + newOffset = int64(offset) + } + + hlu.offset = newOffset + + return hlu.offset, nil +} + +func (hlu *httpLayerUpload) UUID() string { + return hlu.uuid +} + +func (hlu *httpLayerUpload) StartedAt() time.Time { + return hlu.startedAt +} + +func (hlu *httpLayerUpload) Finish(digest digest.Digest) (distribution.Layer, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hlu.location, nil) + if err != nil { + return nil, err + } + + values := req.URL.Query() + values.Set("digest", digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hlu.client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + return nil, hlu.handleErrorResponse(resp) + } + + return hlu.repo.Layers().Fetch(digest) +} + +func (hlu *httpLayerUpload) Cancel() error { + panic("not implemented") +} + +func (hlu *httpLayerUpload) Close() error { + hlu.closed = true + return nil +} diff --git a/docs/client/layer_upload_test.go b/docs/client/layer_upload_test.go new file mode 100644 index 00000000..1aa5cf1e --- /dev/null +++ b/docs/client/layer_upload_test.go @@ -0,0 +1,223 @@ +package client + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/testutil" +) + +// Test implements distribution.LayerUpload +var _ distribution.LayerUpload = &httpLayerUpload{} + +func TestUploadReadFrom(t *testing.T) { + _, b := newRandomBlob(64) + repo := "test/upload/readfrom" + locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + // Test Valid case + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {"0-63"}, + }), + }, + }, + // Test invalid range + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {locationPath}, + "Range": {""}, + }), + }, + }, + // Test 404 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusNotFound, + }, + }, + // Test 400 valid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte(` + { + "errors": [ + { + "code": "BLOB_UPLOAD_INVALID", + "message": "invalid upload identifier", + "detail": "more detail" + } + ] + }`), + }, + }, + // Test 400 invalid json + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusBadRequest, + Body: []byte("something bad happened"), + }, + }, + // Test 500 + { + Request: testutil.Request{ + Method: "PATCH", + Route: locationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusInternalServerError, + }, + }, + }) + + e, c := testServer(m) + defer c() + + client, err := e.HTTPClient(repo) + if err != nil { + t.Fatalf("Error creating client: %s", err) + } + layerUpload := &httpLayerUpload{ + client: client, + } + + // Valid case + layerUpload.location = e.Endpoint + locationPath + n, err := layerUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + if n != 64 { + t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) + } + + // Bad range + layerUpload.location = e.Endpoint + locationPath + _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when bad range received") + } + + // 404 + layerUpload.location = e.Endpoint + locationPath + _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if blobErr, ok := err.(*BlobUploadNotFoundError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := e.Endpoint + locationPath; blobErr.Location != expected { + t.Fatalf("Unexpected location: %s, expected %s", blobErr.Location, expected) + } + + // 400 valid json + layerUpload.location = e.Endpoint + locationPath + _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*v2.Errors); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if len(uploadErr.Errors) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr.Errors)) + } else { + v2Err := uploadErr.Errors[0] + if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { + t.Fatalf("Unexpected error code: %s, expected %s", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid.String()) + } + if expected := "invalid upload identifier"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Message, expected) + } + if expected := "more detail"; v2Err.Detail.(string) != expected { + t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Detail.(string), expected) + } + } + + // 400 invalid json + layerUpload.location = e.Endpoint + locationPath + _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else { + respStr := string(uploadErr.Response) + if expected := "something bad happened"; respStr != expected { + t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) + } + } + + // 500 + layerUpload.location = e.Endpoint + locationPath + _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + if err == nil { + t.Fatalf("Expected error when not found") + } + if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { + t.Fatalf("Wrong error type %T: %s", err, err) + } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { + t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) + } +} + +//repo distribution.Repository +//client *http.Client + +//uuid string +//startedAt time.Time + +//location string // always the last value of the location header. +//offset int64 +//closed bool diff --git a/docs/client/repository.go b/docs/client/repository.go index 578c3fca..22a02373 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -1,21 +1,14 @@ package client import ( - "bufio" "bytes" "encoding/json" - "errors" "fmt" - "io" - "io/ioutil" "net/http" "net/url" - "os" "strconv" "time" - ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest" "github.com/docker/distribution/digest" @@ -276,7 +269,8 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { } return &httpLayerUpload{ - layers: ls, + repo: ls.repository, + client: ls.client, uuid: uuid, startedAt: time.Now(), location: location, @@ -339,319 +333,3 @@ func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { return nil, &UnexpectedHTTPStatusError{Status: resp.Status} } } - -type httpLayer struct { - *layers - - size int64 - digest digest.Digest - createdAt time.Time - - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 - err error -} - -func (hl *httpLayer) CreatedAt() time.Time { - return hl.createdAt -} - -func (hl *httpLayer) Digest() digest.Digest { - return hl.digest -} - -func (hl *httpLayer) Read(p []byte) (n int, err error) { - if hl.err != nil { - return 0, hl.err - } - - rd, err := hl.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hl.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && hl.offset >= hl.size { - err = io.EOF - } - - return n, err -} - -func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { - if hl.err != nil { - return 0, hl.err - } - - var err error - newOffset := hl.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = hl.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if hl.offset != newOffset { - hl.reset() - } - - // No problems, set the offset. - hl.offset = newOffset - } - - return hl.offset, err -} - -func (hl *httpLayer) Close() error { - if hl.err != nil { - return hl.err - } - - // close and release reader chain - if hl.rc != nil { - hl.rc.Close() - } - - hl.rc = nil - hl.brd = nil - - hl.err = fmt.Errorf("httpLayer: closed") - - return nil -} - -func (hl *httpLayer) reset() { - if hl.err != nil { - return - } - if hl.rc != nil { - hl.rc.Close() - hl.rc = nil - } -} - -func (hl *httpLayer) reader() (io.Reader, error) { - if hl.err != nil { - return nil, hl.err - } - - if hl.rc != nil { - return hl.brd, nil - } - - // If the offset is great than or equal to size, return a empty, noop reader. - if hl.offset >= hl.size { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - - blobURL, err := hl.ub.BuildBlobURL(hl.name, hl.digest) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", blobURL, nil) - if err != nil { - return nil, err - } - - if hl.offset > 0 { - // TODO(stevvooe): Get this working correctly. - - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("1-")) - ctxu.GetLogger(hl.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hl.client.Do(req) - if err != nil { - return nil, err - } - - switch { - case resp.StatusCode == 200: - hl.rc = resp.Body - default: - defer resp.Body.Close() - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - if hl.brd == nil { - hl.brd = bufio.NewReader(hl.rc) - } else { - hl.brd.Reset(hl.rc) - } - - return hl.brd, nil -} - -func (hl *httpLayer) Length() int64 { - return hl.size -} - -func (hl *httpLayer) Handler(r *http.Request) (http.Handler, error) { - panic("Not implemented") -} - -type httpLayerUpload struct { - *layers - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -var _ distribution.LayerUpload = &httpLayerUpload{} - -func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hlu.location, r) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hlu.client.Do(req) - if err != nil { - return 0, err - } - - switch { - case resp.StatusCode == http.StatusAccepted: - // TODO(dmcgowan): Validate headers - hlu.uuid = resp.Header.Get("Docker-Upload-UUID") - hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - case resp.StatusCode == http.StatusNotFound: - return 0, &BlobUploadNotFoundError{Location: hlu.location} - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return 0, parseHTTPErrorResponse(resp) - default: - return 0, &UnexpectedHTTPStatusError{Status: resp.Status} - } -} - -func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hlu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hlu.offset, hlu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hlu.client.Do(req) - if err != nil { - return 0, err - } - - switch { - case resp.StatusCode == http.StatusAccepted: - // TODO(dmcgowan): Validate headers - hlu.uuid = resp.Header.Get("Docker-Upload-UUID") - hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - case resp.StatusCode == http.StatusNotFound: - return 0, &BlobUploadNotFoundError{Location: hlu.location} - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return 0, parseHTTPErrorResponse(resp) - default: - return 0, &UnexpectedHTTPStatusError{Status: resp.Status} - } -} - -func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hlu.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - return newOffset, errors.New("Cannot seek from end on incomplete upload") - case os.SEEK_SET: - newOffset = int64(offset) - } - - hlu.offset = newOffset - - return hlu.offset, nil -} - -func (hlu *httpLayerUpload) UUID() string { - return hlu.uuid -} - -func (hlu *httpLayerUpload) StartedAt() time.Time { - return hlu.startedAt -} - -func (hlu *httpLayerUpload) Finish(digest digest.Digest) (distribution.Layer, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hlu.location, nil) - if err != nil { - return nil, err - } - - values := req.URL.Query() - values.Set("digest", digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hlu.client.Do(req) - if err != nil { - return nil, err - } - - switch { - case resp.StatusCode == http.StatusCreated: - return hlu.Layers().Fetch(digest) - case resp.StatusCode == http.StatusNotFound: - return nil, &BlobUploadNotFoundError{Location: hlu.location} - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return nil, parseHTTPErrorResponse(resp) - default: - return nil, &UnexpectedHTTPStatusError{Status: resp.Status} - } -} - -func (hlu *httpLayerUpload) Cancel() error { - panic("not implemented") -} - -func (hlu *httpLayerUpload) Close() error { - hlu.closed = true - return nil -} From b78727cbf91ffddfc4af042a7d9039dbe70ce9f1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 8 May 2015 16:29:23 -0700 Subject: [PATCH 110/501] Cleanup session and config interface Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/authchallenge.go | 10 +- docs/client/endpoint.go | 268 ----------------- docs/client/layer_upload_test.go | 17 +- docs/client/repository.go | 8 +- docs/client/repository_test.go | 21 +- docs/client/session.go | 282 ++++++++++++++++++ .../{endpoint_test.go => session_test.go} | 63 ++-- docs/client/token.go | 78 ----- docs/client/transport.go | 120 ++++++++ 9 files changed, 475 insertions(+), 392 deletions(-) delete mode 100644 docs/client/endpoint.go create mode 100644 docs/client/session.go rename docs/client/{endpoint_test.go => session_test.go} (79%) delete mode 100644 docs/client/token.go create mode 100644 docs/client/transport.go diff --git a/docs/client/authchallenge.go b/docs/client/authchallenge.go index f45704b1..a9cce3cc 100644 --- a/docs/client/authchallenge.go +++ b/docs/client/authchallenge.go @@ -8,9 +8,9 @@ import ( // Octet types from RFC 2616. type octetType byte -// AuthorizationChallenge carries information +// authorizationChallenge carries information // from a WWW-Authenticate response header. -type AuthorizationChallenge struct { +type authorizationChallenge struct { Scheme string Parameters map[string]string } @@ -54,12 +54,12 @@ func init() { } } -func parseAuthHeader(header http.Header) []AuthorizationChallenge { - var challenges []AuthorizationChallenge +func parseAuthHeader(header http.Header) []authorizationChallenge { + var challenges []authorizationChallenge for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { - challenges = append(challenges, AuthorizationChallenge{Scheme: v, Parameters: p}) + challenges = append(challenges, authorizationChallenge{Scheme: v, Parameters: p}) } } return challenges diff --git a/docs/client/endpoint.go b/docs/client/endpoint.go deleted file mode 100644 index 9889dc66..00000000 --- a/docs/client/endpoint.go +++ /dev/null @@ -1,268 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/v2" -) - -// Authorizer is used to apply Authorization to an HTTP request -type Authorizer interface { - // Authorizer updates an HTTP request with the needed authorization - Authorize(req *http.Request) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) -} - -// RepositoryEndpoint represents a single host endpoint serving up -// the distribution API. -type RepositoryEndpoint struct { - Endpoint string - Mirror bool - - Header http.Header - Credentials CredentialStore - - ub *v2.URLBuilder -} - -type nullAuthorizer struct{} - -func (na nullAuthorizer) Authorize(req *http.Request) error { - return nil -} - -type repositoryTransport struct { - Transport http.RoundTripper - Header http.Header - Authorizer Authorizer -} - -func (rt *repositoryTransport) RoundTrip(req *http.Request) (*http.Response, error) { - reqCopy := new(http.Request) - *reqCopy = *req - - // Copy existing headers then static headers - reqCopy.Header = make(http.Header, len(req.Header)+len(rt.Header)) - for k, s := range req.Header { - reqCopy.Header[k] = append([]string(nil), s...) - } - for k, s := range rt.Header { - reqCopy.Header[k] = append(reqCopy.Header[k], s...) - } - - if rt.Authorizer != nil { - if err := rt.Authorizer.Authorize(reqCopy); err != nil { - return nil, err - } - } - - logrus.Debugf("HTTP: %s %s", req.Method, req.URL) - - if rt.Transport != nil { - return rt.Transport.RoundTrip(reqCopy) - } - return http.DefaultTransport.RoundTrip(reqCopy) -} - -type authTransport struct { - Transport http.RoundTripper - Header http.Header -} - -func (rt *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { - reqCopy := new(http.Request) - *reqCopy = *req - - // Copy existing headers then static headers - reqCopy.Header = make(http.Header, len(req.Header)+len(rt.Header)) - for k, s := range req.Header { - reqCopy.Header[k] = append([]string(nil), s...) - } - for k, s := range rt.Header { - reqCopy.Header[k] = append(reqCopy.Header[k], s...) - } - - logrus.Debugf("HTTP: %s %s", req.Method, req.URL) - - if rt.Transport != nil { - return rt.Transport.RoundTrip(reqCopy) - } - return http.DefaultTransport.RoundTrip(reqCopy) -} - -// URLBuilder returns a new URL builder -func (e *RepositoryEndpoint) URLBuilder() (*v2.URLBuilder, error) { - if e.ub == nil { - var err error - e.ub, err = v2.NewURLBuilderFromString(e.Endpoint) - if err != nil { - return nil, err - } - } - - return e.ub, nil -} - -// HTTPClient returns a new HTTP client configured for this endpoint -func (e *RepositoryEndpoint) HTTPClient(name string) (*http.Client, error) { - // TODO(dmcgowan): create http.Transport - - transport := &repositoryTransport{ - Header: e.Header, - } - client := &http.Client{ - Transport: transport, - } - - challenges, err := e.ping(client) - if err != nil { - return nil, err - } - actions := []string{"pull"} - if !e.Mirror { - actions = append(actions, "push") - } - - transport.Authorizer = &endpointAuthorizer{ - client: &http.Client{Transport: &authTransport{Header: e.Header}}, - challenges: challenges, - creds: e.Credentials, - resource: "repository", - scope: name, - actions: actions, - } - - return client, nil -} - -func (e *RepositoryEndpoint) ping(client *http.Client) ([]AuthorizationChallenge, error) { - ub, err := e.URLBuilder() - if err != nil { - return nil, err - } - u, err := ub.BuildBaseURL() - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - req.Header = make(http.Header, len(e.Header)) - for k, s := range e.Header { - req.Header[k] = append([]string(nil), s...) - } - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var supportsV2 bool -HeaderLoop: - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { - for _, versionName := range strings.Fields(supportedVersions) { - if versionName == "registry/2.0" { - supportsV2 = true - break HeaderLoop - } - } - } - - if !supportsV2 { - return nil, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e.Endpoint) - } - - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header), nil - } else if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) - } - - return nil, nil -} - -type endpointAuthorizer struct { - client *http.Client - challenges []AuthorizationChallenge - creds CredentialStore - - resource string - scope string - actions []string - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time -} - -func (ta *endpointAuthorizer) Authorize(req *http.Request) error { - token, err := ta.getToken() - if err != nil { - return err - } - if token != "" { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - } else if ta.creds != nil { - username, password := ta.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - } - } - return nil -} - -func (ta *endpointAuthorizer) getToken() (string, error) { - ta.tokenLock.Lock() - defer ta.tokenLock.Unlock() - now := time.Now() - if now.Before(ta.tokenExpiration) { - //log.Debugf("Using cached token for %q", ta.auth.Username) - return ta.tokenCache, nil - } - - for _, challenge := range ta.challenges { - switch strings.ToLower(challenge.Scheme) { - case "basic": - // no token necessary - case "bearer": - //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) - params := map[string]string{} - for k, v := range challenge.Parameters { - params[k] = v - } - params["scope"] = fmt.Sprintf("%s:%s:%s", ta.resource, ta.scope, strings.Join(ta.actions, ",")) - token, err := getToken(ta.creds, params, ta.client) - if err != nil { - return "", err - } - ta.tokenCache = token - ta.tokenExpiration = now.Add(time.Minute) - - return token, nil - default: - //log.Infof("Unsupported auth scheme: %q", challenge.Scheme) - } - } - - // Do not expire cache since there are no challenges which use a token - ta.tokenExpiration = time.Now().Add(time.Hour * 24) - - return "", nil -} diff --git a/docs/client/layer_upload_test.go b/docs/client/layer_upload_test.go index 1aa5cf1e..9e22cb7c 100644 --- a/docs/client/layer_upload_test.go +++ b/docs/client/layer_upload_test.go @@ -124,7 +124,8 @@ func TestUploadReadFrom(t *testing.T) { e, c := testServer(m) defer c() - client, err := e.HTTPClient(repo) + repoConfig := &RepositoryConfig{} + client, err := repoConfig.HTTPClient() if err != nil { t.Fatalf("Error creating client: %s", err) } @@ -133,7 +134,7 @@ func TestUploadReadFrom(t *testing.T) { } // Valid case - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath n, err := layerUpload.ReadFrom(bytes.NewReader(b)) if err != nil { t.Fatalf("Error calling ReadFrom: %s", err) @@ -143,26 +144,26 @@ func TestUploadReadFrom(t *testing.T) { } // Bad range - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath _, err = layerUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when bad range received") } // 404 - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath _, err = layerUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } if blobErr, ok := err.(*BlobUploadNotFoundError); !ok { t.Fatalf("Wrong error type %T: %s", err, err) - } else if expected := e.Endpoint + locationPath; blobErr.Location != expected { + } else if expected := e + locationPath; blobErr.Location != expected { t.Fatalf("Unexpected location: %s, expected %s", blobErr.Location, expected) } // 400 valid json - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath _, err = layerUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") @@ -185,7 +186,7 @@ func TestUploadReadFrom(t *testing.T) { } // 400 invalid json - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath _, err = layerUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") @@ -200,7 +201,7 @@ func TestUploadReadFrom(t *testing.T) { } // 500 - layerUpload.location = e.Endpoint + locationPath + layerUpload.location = e + locationPath _, err = layerUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") diff --git a/docs/client/repository.go b/docs/client/repository.go index 22a02373..d5f75bda 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -19,17 +19,17 @@ import ( ) // NewRepository creates a new Repository for the given repository name and endpoint -func NewRepository(ctx context.Context, name string, endpoint *RepositoryEndpoint) (distribution.Repository, error) { +func NewRepository(ctx context.Context, name, endpoint string, repoConfig *RepositoryConfig) (distribution.Repository, error) { if err := v2.ValidateRespositoryName(name); err != nil { return nil, err } - ub, err := endpoint.URLBuilder() + ub, err := v2.NewURLBuilderFromString(endpoint) if err != nil { return nil, err } - client, err := endpoint.HTTPClient(name) + client, err := repoConfig.HTTPClient() if err != nil { return nil, err } @@ -39,7 +39,7 @@ func NewRepository(ctx context.Context, name string, endpoint *RepositoryEndpoin ub: ub, name: name, context: ctx, - mirror: endpoint.Mirror, + mirror: repoConfig.AllowMirrors, }, nil } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index b96c52e5..1674213d 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -20,11 +20,10 @@ import ( "golang.org/x/net/context" ) -func testServer(rrm testutil.RequestResponseMap) (*RepositoryEndpoint, func()) { +func testServer(rrm testutil.RequestResponseMap) (string, func()) { h := testutil.NewHandler(rrm) s := httptest.NewServer(h) - e := RepositoryEndpoint{Endpoint: s.URL, Mirror: false} - return &e, s.Close + return s.URL, s.Close } func newRandomBlob(size int) (digest.Digest, []byte) { @@ -97,7 +96,7 @@ func TestLayerFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -127,7 +126,7 @@ func TestLayerExists(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -227,7 +226,7 @@ func TestLayerUploadChunked(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -334,7 +333,7 @@ func TestLayerUploadMonolithic(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -475,7 +474,7 @@ func TestManifestFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -508,7 +507,7 @@ func TestManifestFetchByTag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -553,7 +552,7 @@ func TestManifestDelete(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } @@ -591,7 +590,7 @@ func TestManifestPut(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e) + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) if err != nil { t.Fatal(err) } diff --git a/docs/client/session.go b/docs/client/session.go new file mode 100644 index 00000000..bd8abe0f --- /dev/null +++ b/docs/client/session.go @@ -0,0 +1,282 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +// Authorizer is used to apply Authorization to an HTTP request +type Authorizer interface { + // Authorizer updates an HTTP request with the needed authorization + Authorize(req *http.Request) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) +} + +// RepositoryConfig holds the base configuration needed to communicate +// with a registry including a method of authorization and HTTP headers. +type RepositoryConfig struct { + Header http.Header + AuthSource Authorizer + AllowMirrors bool +} + +// HTTPClient returns a new HTTP client configured for this configuration +func (rc *RepositoryConfig) HTTPClient() (*http.Client, error) { + // TODO(dmcgowan): create base http.Transport with proper TLS configuration + + transport := &Transport{ + ExtraHeader: rc.Header, + AuthSource: rc.AuthSource, + } + + client := &http.Client{ + Transport: transport, + } + + return client, nil +} + +// TokenScope represents the scope at which a token will be requested. +// This represents a specific action on a registry resource. +type TokenScope struct { + Resource string + Scope string + Actions []string +} + +func (ts TokenScope) String() string { + return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +} + +// NewTokenAuthorizer returns an authorizer which is capable of getting a token +// from a token server. The expected authorization method will be discovered +// by the authorizer, getting the token server endpoint from the URL being +// requested. Basic authentication may either be done to the token source or +// directly with the requested endpoint depending on the endpoint's +// WWW-Authenticate header. +func NewTokenAuthorizer(creds CredentialStore, header http.Header, scope TokenScope) Authorizer { + return &tokenAuthorizer{ + header: header, + creds: creds, + scope: scope, + challenges: map[string][]authorizationChallenge{}, + } +} + +type tokenAuthorizer struct { + header http.Header + challenges map[string][]authorizationChallenge + creds CredentialStore + scope TokenScope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +func (ta *tokenAuthorizer) ping(endpoint string) ([]authorizationChallenge, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + resp, err := ta.client().Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var supportsV2 bool +HeaderLoop: + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { + for _, versionName := range strings.Fields(supportedVersions) { + if versionName == "registry/2.0" { + supportsV2 = true + break HeaderLoop + } + } + } + + if !supportsV2 { + return nil, fmt.Errorf("%s does not appear to be a v2 registry endpoint", endpoint) + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header), nil + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) + } + + return nil, nil +} + +func (ta *tokenAuthorizer) Authorize(req *http.Request) error { + v2Root := strings.Index(req.URL.Path, "/v2/") + if v2Root == -1 { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: req.URL.Path[:v2Root+4], + } + + pingEndpoint := ping.String() + + challenges, ok := ta.challenges[pingEndpoint] + if !ok { + var err error + challenges, err = ta.ping(pingEndpoint) + if err != nil { + return err + } + ta.challenges[pingEndpoint] = challenges + } + + return ta.setAuth(challenges, req) +} + +func (ta *tokenAuthorizer) client() *http.Client { + // TODO(dmcgowan): Use same transport which has properly configured TLS + return &http.Client{Transport: &Transport{ExtraHeader: ta.header}} +} + +func (ta *tokenAuthorizer) setAuth(challenges []authorizationChallenge, req *http.Request) error { + var useBasic bool + for _, challenge := range challenges { + switch strings.ToLower(challenge.Scheme) { + case "basic": + useBasic = true + case "bearer": + if err := ta.refreshToken(challenge); err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ta.tokenCache)) + + return nil + default: + //log.Infof("Unsupported auth scheme: %q", challenge.Scheme) + } + } + + // Only use basic when no token auth challenges found + if useBasic { + if ta.creds != nil { + username, password := ta.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return errors.New("no basic auth credentials") + } + + return nil +} + +func (ta *tokenAuthorizer) refreshToken(challenge authorizationChallenge) error { + ta.tokenLock.Lock() + defer ta.tokenLock.Unlock() + now := time.Now() + if now.After(ta.tokenExpiration) { + token, err := ta.fetchToken(challenge) + if err != nil { + return err + } + ta.tokenCache = token + ta.tokenExpiration = now.Add(time.Minute) + } + + return nil +} + +type tokenResponse struct { + Token string `json:"token"` +} + +func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token string, err error) { + //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) + params := map[string]string{} + for k, v := range challenge.Parameters { + params[k] = v + } + params["scope"] = ta.scope.String() + + realm, ok := params["realm"] + if !ok { + return "", errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + // TODO(dmcgowan): Handle empty scheme + + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", err + } + + reqParams := req.URL.Query() + service := params["service"] + scope := params["scope"] + + if service != "" { + reqParams.Add("service", service) + } + + for _, scopeField := range strings.Fields(scope) { + reqParams.Add("scope", scopeField) + } + + if ta.creds != nil { + username, password := ta.creds.Basic(realmURL) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ta.client().Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + decoder := json.NewDecoder(resp.Body) + + tr := new(tokenResponse) + if err = decoder.Decode(tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.Token == "" { + return "", errors.New("authorization server did not include a token in the response") + } + + return tr.Token, nil +} diff --git a/docs/client/endpoint_test.go b/docs/client/session_test.go similarity index 79% rename from docs/client/endpoint_test.go rename to docs/client/session_test.go index 42bdc357..87e1e66e 100644 --- a/docs/client/endpoint_test.go +++ b/docs/client/session_test.go @@ -30,7 +30,7 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re w.next.ServeHTTP(rw, r) } -func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (*RepositoryEndpoint, func()) { +func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { h := testutil.NewHandler(rrm) wrapper := &testAuthenticationWrapper{ @@ -43,8 +43,7 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au } s := httptest.NewServer(wrapper) - e := RepositoryEndpoint{Endpoint: s.URL, Mirror: false} - return &e, s.Close + return s.URL, s.Close } type testCredentialStore struct { @@ -62,6 +61,16 @@ func TestEndpointAuthorizeToken(t *testing.T) { repo2 := "other/registry" scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + tokenScope1 := TokenScope{ + Resource: "repository", + Scope: repo1, + Actions: []string{"pull", "push"}, + } + tokenScope2 := TokenScope{ + Resource: "repository", + Scope: repo2, + Actions: []string{"pull", "push"}, + } tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { @@ -92,7 +101,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { { Request: testutil.Request{ Method: "GET", - Route: "/hello", + Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -100,19 +109,23 @@ func TestEndpointAuthorizeToken(t *testing.T) { }, }) - authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te.Endpoint+"/token", service) + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) validCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - client, err := e.HTTPClient(repo1) + repo1Config := &RepositoryConfig{ + AuthSource: NewTokenAuthorizer(nil, nil, tokenScope1), + } + + client, err := repo1Config.HTTPClient() if err != nil { t.Fatalf("Error creating http client: %s", err) } - req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) @@ -128,12 +141,15 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() - client2, err := e2.HTTPClient(repo2) + repo2Config := &RepositoryConfig{ + AuthSource: NewTokenAuthorizer(nil, nil, tokenScope2), + } + client2, err := repo2Config.HTTPClient() if err != nil { t.Fatalf("Error creating http client: %s", err) } - req, _ = http.NewRequest("GET", e.Endpoint+"/hello", nil) + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) resp, err = client2.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) @@ -155,6 +171,11 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { scope := fmt.Sprintf("repository:%s:pull,push", repo) username := "tokenuser" password := "superSecretPa$$word" + tokenScope := TokenScope{ + Resource: "repository", + Scope: repo, + Actions: []string{"pull", "push"}, + } tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { @@ -180,7 +201,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { { Request: testutil.Request{ Method: "GET", - Route: "/hello", + Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -188,24 +209,27 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { }, }) - authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te.Endpoint+"/token", service) + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) bearerCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate2, bearerCheck) defer c() - e.Credentials = &testCredentialStore{ + creds := &testCredentialStore{ username: username, password: password, } + repoConfig := &RepositoryConfig{ + AuthSource: NewTokenAuthorizer(creds, nil, tokenScope), + } - client, err := e.HTTPClient(repo) + client, err := repoConfig.HTTPClient() if err != nil { t.Fatalf("Error creating http client: %s", err) } - req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) @@ -221,7 +245,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) { { Request: testutil.Request{ Method: "GET", - Route: "/hello", + Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -237,17 +261,20 @@ func TestEndpointAuthorizeBasic(t *testing.T) { } e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - e.Credentials = &testCredentialStore{ + creds := &testCredentialStore{ username: username, password: password, } + repoConfig := &RepositoryConfig{ + AuthSource: NewTokenAuthorizer(creds, nil, TokenScope{}), + } - client, err := e.HTTPClient("test/repo/basic") + client, err := repoConfig.HTTPClient() if err != nil { t.Fatalf("Error creating http client: %s", err) } - req, _ := http.NewRequest("GET", e.Endpoint+"/hello", nil) + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) diff --git a/docs/client/token.go b/docs/client/token.go deleted file mode 100644 index 6439e01e..00000000 --- a/docs/client/token.go +++ /dev/null @@ -1,78 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -type tokenResponse struct { - Token string `json:"token"` -} - -func getToken(creds CredentialStore, params map[string]string, client *http.Client) (token string, err error) { - realm, ok := params["realm"] - if !ok { - return "", errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return "", fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - // TODO(dmcgowan): Handle empty scheme - - req, err := http.NewRequest("GET", realmURL.String(), nil) - if err != nil { - return "", err - } - - reqParams := req.URL.Query() - service := params["service"] - scope := params["scope"] - - if service != "" { - reqParams.Add("service", service) - } - - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) - } - - if creds != nil { - username, password := creds.Basic(realmURL) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - decoder := json.NewDecoder(resp.Body) - - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.Token == "" { - return "", errors.New("authorization server did not include a token in the response") - } - - return tr.Token, nil -} diff --git a/docs/client/transport.go b/docs/client/transport.go new file mode 100644 index 00000000..e92ba543 --- /dev/null +++ b/docs/client/transport.go @@ -0,0 +1,120 @@ +package client + +import ( + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes registry HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// from an Auth source +type Transport struct { + AuthSource Authorizer + ExtraHeader http.Header + + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := t.cloneRequest(req) + if t.AuthSource != nil { + if err := t.AuthSource.Authorize(req2); err != nil { + return nil, err + } + } + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func (t *Transport) cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + for k, s := range t.ExtraHeader { + r2.Header[k] = append(r2.Header[k], s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} From 2eb9b286ed575fa2165b38a971da7fcf4003510f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 8 May 2015 16:33:27 -0700 Subject: [PATCH 111/501] Use distribution context instead of google Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/repository.go | 2 +- docs/client/repository_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index d5f75bda..0cda0d83 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -14,8 +14,8 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/v2" - "golang.org/x/net/context" ) // NewRepository creates a new Repository for the given repository name and endpoint diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 1674213d..f53112dc 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -14,10 +14,10 @@ import ( "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/testutil" - "golang.org/x/net/context" ) func testServer(rrm testutil.RequestResponseMap) (string, func()) { From 7d630192dda132713aba1583d4937fb75951b785 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 8 May 2015 17:40:30 -0700 Subject: [PATCH 112/501] Add tags implementation Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/errors.go | 7 +++++ docs/client/layer_upload.go | 8 ++--- docs/client/repository.go | 56 ++++++++++++++++++++++------------ docs/client/repository_test.go | 52 +++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 25 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index e02b0f73..adb909d1 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -112,3 +112,10 @@ func parseHTTPErrorResponse(response *http.Response) error { } return &errors } + +func handleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return parseHTTPErrorResponse(resp) + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} diff --git a/docs/client/layer_upload.go b/docs/client/layer_upload.go index ce0794c2..02cc5162 100644 --- a/docs/client/layer_upload.go +++ b/docs/client/layer_upload.go @@ -26,14 +26,10 @@ type httpLayerUpload struct { } func (hlu *httpLayerUpload) handleErrorResponse(resp *http.Response) error { - switch { - case resp.StatusCode == http.StatusNotFound: + if resp.StatusCode == http.StatusNotFound { return &BlobUploadNotFoundError{Location: hlu.location} - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return parseHTTPErrorResponse(resp) - default: - return &UnexpectedHTTPStatusError{Status: resp.Status} } + return handleErrorResponse(resp) } func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { diff --git a/docs/client/repository.go b/docs/client/repository.go index 0cda0d83..c79c306b 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" "strconv" @@ -90,7 +91,36 @@ type manifests struct { } func (ms *manifests) Tags() ([]string, error) { - panic("not implemented") + u, err := ms.ub.BuildTagsURL(ms.name) + if err != nil { + return nil, err + } + + resp, err := ms.client.Get(u) + if err != nil { + return nil, err + } + + switch { + case resp.StatusCode == http.StatusOK: + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return nil, err + } + + return tagsResponse.Tags, nil + case resp.StatusCode == http.StatusNotFound: + return nil, nil + default: + return nil, handleErrorResponse(resp) + } } func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { @@ -113,10 +143,8 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return true, nil case resp.StatusCode == http.StatusNotFound: return false, nil - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return false, parseHTTPErrorResponse(resp) default: - return false, &UnexpectedHTTPStatusError{Status: resp.Status} + return false, handleErrorResponse(resp) } } @@ -146,10 +174,8 @@ func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) { } return &sm, nil - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return nil, parseHTTPErrorResponse(resp) default: - return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + return nil, handleErrorResponse(resp) } } @@ -174,10 +200,8 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error { case resp.StatusCode == http.StatusAccepted: // TODO(dmcgowan): Use or check digest header return nil - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return parseHTTPErrorResponse(resp) default: - return &UnexpectedHTTPStatusError{Status: resp.Status} + return handleErrorResponse(resp) } } @@ -200,10 +224,8 @@ func (ms *manifests) Delete(dgst digest.Digest) error { switch { case resp.StatusCode == http.StatusOK: return nil - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return parseHTTPErrorResponse(resp) default: - return &UnexpectedHTTPStatusError{Status: resp.Status} + return handleErrorResponse(resp) } } @@ -275,10 +297,8 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { startedAt: time.Now(), location: location, }, nil - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return nil, parseHTTPErrorResponse(resp) default: - return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + return nil, handleErrorResponse(resp) } } @@ -327,9 +347,7 @@ func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { BlobSum: dgst, }, } - case resp.StatusCode >= 400 && resp.StatusCode < 500: - return nil, parseHTTPErrorResponse(resp) default: - return nil, &UnexpectedHTTPStatusError{Status: resp.Status} + return nil, handleErrorResponse(resp) } } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index f53112dc..fe8ffeb7 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -9,6 +9,7 @@ import ( "log" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -602,3 +603,54 @@ func TestManifestPut(t *testing.T) { // TODO(dmcgowan): Check for error cases } + +func TestManifestTags(t *testing.T) { + repo := "test.example.com/repo/tags/list" + tagsList := []byte(strings.TrimSpace(` +{ + "name": "test.example.com/repo/tags/list", + "tags": [ + "tag1", + "tag2", + "funtag" + ] +} + `)) + var m testutil.RequestResponseMap + addPing(&m) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + if err != nil { + t.Fatal(err) + } + + ms := r.Manifests() + tags, err := ms.Tags() + if err != nil { + t.Fatal(err) + } + + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + // TODO(dmcgowan): Check array + + // TODO(dmcgowan): Check for error cases +} From ecaa643cb24288523eb2108f00c54fb7db7cfe7e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 11 May 2015 11:31:22 -0700 Subject: [PATCH 113/501] Create authentication handler Refactory authorizer to take a set of authentication handlers for different authentication schemes returned by an unauthorized HTTP requst. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/authchallenge.go | 6 +- docs/client/authchallenge_test.go | 21 +-- docs/client/errors.go | 35 ----- docs/client/layer.go | 2 +- docs/client/session.go | 205 +++++++++++++++++++----------- 5 files changed, 143 insertions(+), 126 deletions(-) diff --git a/docs/client/authchallenge.go b/docs/client/authchallenge.go index a9cce3cc..49cf270e 100644 --- a/docs/client/authchallenge.go +++ b/docs/client/authchallenge.go @@ -54,12 +54,12 @@ func init() { } } -func parseAuthHeader(header http.Header) []authorizationChallenge { - var challenges []authorizationChallenge +func parseAuthHeader(header http.Header) map[string]authorizationChallenge { + challenges := map[string]authorizationChallenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { - challenges = append(challenges, authorizationChallenge{Scheme: v, Parameters: p}) + challenges[v] = authorizationChallenge{Scheme: v, Parameters: p} } } return challenges diff --git a/docs/client/authchallenge_test.go b/docs/client/authchallenge_test.go index bb3016ee..802c94f3 100644 --- a/docs/client/authchallenge_test.go +++ b/docs/client/authchallenge_test.go @@ -13,25 +13,26 @@ func TestAuthChallengeParse(t *testing.T) { if len(challenges) != 1 { t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) } + challenge := challenges["bearer"] - if expected := "bearer"; challenges[0].Scheme != expected { - t.Fatalf("Unexpected scheme: %s, expected: %s", challenges[0].Scheme, expected) + if expected := "bearer"; challenge.Scheme != expected { + t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) } - if expected := "https://auth.example.com/token"; challenges[0].Parameters["realm"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["realm"], expected) + if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) } - if expected := "registry.example.com"; challenges[0].Parameters["service"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["service"], expected) + if expected := "registry.example.com"; challenge.Parameters["service"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) } - if expected := "fun"; challenges[0].Parameters["other"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["other"], expected) + if expected := "fun"; challenge.Parameters["other"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) } - if expected := "he\"llo"; challenges[0].Parameters["slashed"] != expected { - t.Fatalf("Unexpected param: %s, expected: %s", challenges[0].Parameters["slashed"], expected) + if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { + t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) } } diff --git a/docs/client/errors.go b/docs/client/errors.go index adb909d1..2bb64a44 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -6,44 +6,9 @@ import ( "io/ioutil" "net/http" - "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" ) -// RepositoryNotFoundError is returned when making an operation against a -// repository that does not exist in the registry. -type RepositoryNotFoundError struct { - Name string -} - -func (e *RepositoryNotFoundError) Error() string { - return fmt.Sprintf("No repository found with Name: %s", e.Name) -} - -// ImageManifestNotFoundError is returned when making an operation against a -// given image manifest that does not exist in the registry. -type ImageManifestNotFoundError struct { - Name string - Tag string -} - -func (e *ImageManifestNotFoundError) Error() string { - return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", - e.Name, e.Tag) -} - -// BlobNotFoundError is returned when making an operation against a given image -// layer that does not exist in the registry. -type BlobNotFoundError struct { - Name string - Digest digest.Digest -} - -func (e *BlobNotFoundError) Error() string { - return fmt.Sprintf("No blob found with Name: %s, Digest: %s", - e.Name, e.Digest) -} - // BlobUploadNotFoundError is returned when making a blob upload operation against an // invalid blob upload location url. // This may be the result of using a cancelled, completed, or stale upload diff --git a/docs/client/layer.go b/docs/client/layer.go index f61a9034..b6e1697d 100644 --- a/docs/client/layer.go +++ b/docs/client/layer.go @@ -48,7 +48,7 @@ func (hl *httpLayer) Read(p []byte) (n int, err error) { n, err = rd.Read(p) hl.offset += int64(n) - // Simulate io.EOR error if we reach filesize. + // Simulate io.EOF error if we reach filesize. if err == nil && hl.offset >= hl.size { err = io.EOF } diff --git a/docs/client/session.go b/docs/client/session.go index bd8abe0f..97e932ff 100644 --- a/docs/client/session.go +++ b/docs/client/session.go @@ -17,6 +17,13 @@ type Authorizer interface { Authorize(req *http.Request) error } +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + Scheme() string + AuthorizeRequest(req *http.Request, params map[string]string) error +} + // CredentialStore is an interface for getting credentials for // a given URL type CredentialStore interface { @@ -48,18 +55,6 @@ func (rc *RepositoryConfig) HTTPClient() (*http.Client, error) { return client, nil } -// TokenScope represents the scope at which a token will be requested. -// This represents a specific action on a registry resource. -type TokenScope struct { - Resource string - Scope string - Actions []string -} - -func (ts TokenScope) String() string { - return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) -} - // NewTokenAuthorizer returns an authorizer which is capable of getting a token // from a token server. The expected authorization method will be discovered // by the authorizer, getting the token server endpoint from the URL being @@ -69,24 +64,37 @@ func (ts TokenScope) String() string { func NewTokenAuthorizer(creds CredentialStore, header http.Header, scope TokenScope) Authorizer { return &tokenAuthorizer{ header: header, - creds: creds, - scope: scope, - challenges: map[string][]authorizationChallenge{}, + challenges: map[string]map[string]authorizationChallenge{}, + handlers: []AuthenticationHandler{ + NewTokenHandler(creds, scope, header), + NewBasicHandler(creds), + }, + } +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. +func NewAuthorizer(header http.Header, handlers ...AuthenticationHandler) Authorizer { + return &tokenAuthorizer{ + header: header, + challenges: map[string]map[string]authorizationChallenge{}, + handlers: handlers, } } type tokenAuthorizer struct { header http.Header - challenges map[string][]authorizationChallenge - creds CredentialStore - scope TokenScope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time + challenges map[string]map[string]authorizationChallenge + handlers []AuthenticationHandler } -func (ta *tokenAuthorizer) ping(endpoint string) ([]authorizationChallenge, error) { +func (ta *tokenAuthorizer) client() *http.Client { + // TODO(dmcgowan): Use same transport which has properly configured TLS + return &http.Client{Transport: &Transport{ExtraHeader: ta.header}} +} + +func (ta *tokenAuthorizer) ping(endpoint string) (map[string]authorizationChallenge, error) { req, err := http.NewRequest("GET", endpoint, nil) if err != nil { return nil, err @@ -98,6 +106,7 @@ func (ta *tokenAuthorizer) ping(endpoint string) ([]authorizationChallenge, erro } defer resp.Body.Close() + // TODO(dmcgowan): Add version string which would allow skipping this section var supportsV2 bool HeaderLoop: for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { @@ -148,59 +157,80 @@ func (ta *tokenAuthorizer) Authorize(req *http.Request) error { ta.challenges[pingEndpoint] = challenges } - return ta.setAuth(challenges, req) -} - -func (ta *tokenAuthorizer) client() *http.Client { - // TODO(dmcgowan): Use same transport which has properly configured TLS - return &http.Client{Transport: &Transport{ExtraHeader: ta.header}} -} - -func (ta *tokenAuthorizer) setAuth(challenges []authorizationChallenge, req *http.Request) error { - var useBasic bool - for _, challenge := range challenges { - switch strings.ToLower(challenge.Scheme) { - case "basic": - useBasic = true - case "bearer": - if err := ta.refreshToken(challenge); err != nil { + for _, handler := range ta.handlers { + challenge, ok := challenges[handler.Scheme()] + if ok { + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { return err } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ta.tokenCache)) - - return nil - default: - //log.Infof("Unsupported auth scheme: %q", challenge.Scheme) } } - // Only use basic when no token auth challenges found - if useBasic { - if ta.creds != nil { - username, password := ta.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return errors.New("no basic auth credentials") - } - return nil } -func (ta *tokenAuthorizer) refreshToken(challenge authorizationChallenge) error { - ta.tokenLock.Lock() - defer ta.tokenLock.Unlock() +type tokenHandler struct { + header http.Header + creds CredentialStore + scope TokenScope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time +} + +// TokenScope represents the scope at which a token will be requested. +// This represents a specific action on a registry resource. +type TokenScope struct { + Resource string + Scope string + Actions []string +} + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(creds CredentialStore, scope TokenScope, header http.Header) AuthenticationHandler { + return &tokenHandler{ + header: header, + creds: creds, + scope: scope, + } +} + +func (ts TokenScope) String() string { + return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +} + +func (ts *tokenHandler) client() *http.Client { + // TODO(dmcgowan): Use same transport which has properly configured TLS + return &http.Client{Transport: &Transport{ExtraHeader: ts.header}} +} + +func (ts *tokenHandler) Scheme() string { + return "bearer" +} + +func (ts *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if err := ts.refreshToken(params); err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.tokenCache)) + + return nil +} + +func (ts *tokenHandler) refreshToken(params map[string]string) error { + ts.tokenLock.Lock() + defer ts.tokenLock.Unlock() now := time.Now() - if now.After(ta.tokenExpiration) { - token, err := ta.fetchToken(challenge) + if now.After(ts.tokenExpiration) { + token, err := ts.fetchToken(params) if err != nil { return err } - ta.tokenCache = token - ta.tokenExpiration = now.Add(time.Minute) + ts.tokenCache = token + ts.tokenExpiration = now.Add(time.Minute) } return nil @@ -210,26 +240,20 @@ type tokenResponse struct { Token string `json:"token"` } -func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token string, err error) { +func (ts *tokenHandler) fetchToken(params map[string]string) (token string, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) - params := map[string]string{} - for k, v := range challenge.Parameters { - params[k] = v - } - params["scope"] = ta.scope.String() - realm, ok := params["realm"] if !ok { return "", errors.New("no realm specified for token auth challenge") } + // TODO(dmcgowan): Handle empty scheme + realmURL, err := url.Parse(realm) if err != nil { return "", fmt.Errorf("invalid token auth challenge realm: %s", err) } - // TODO(dmcgowan): Handle empty scheme - req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { return "", err @@ -237,7 +261,7 @@ func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token s reqParams := req.URL.Query() service := params["service"] - scope := params["scope"] + scope := ts.scope.String() if service != "" { reqParams.Add("service", service) @@ -247,8 +271,8 @@ func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token s reqParams.Add("scope", scopeField) } - if ta.creds != nil { - username, password := ta.creds.Basic(realmURL) + if ts.creds != nil { + username, password := ts.creds.Basic(realmURL) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) @@ -257,7 +281,7 @@ func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token s req.URL.RawQuery = reqParams.Encode() - resp, err := ta.client().Do(req) + resp, err := ts.client().Do(req) if err != nil { return "", err } @@ -280,3 +304,30 @@ func (ta *tokenAuthorizer) fetchToken(challenge authorizationChallenge) (token s return tr.Token, nil } + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return errors.New("no basic auth credentials") +} From 17cbbf648fe31b942316140744d4f20a000f2d9a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 11 May 2015 16:39:12 -0700 Subject: [PATCH 114/501] Update ReadFrom to wrap reader in NopCloser Wrapping the reader in a NopCloser is necessary to prevent the http library from closing the input reader. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/layer_upload.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/client/layer_upload.go b/docs/client/layer_upload.go index 02cc5162..18e5fbab 100644 --- a/docs/client/layer_upload.go +++ b/docs/client/layer_upload.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "os" "time" @@ -33,7 +34,7 @@ func (hlu *httpLayerUpload) handleErrorResponse(resp *http.Response) error { } func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hlu.location, r) + req, err := http.NewRequest("PATCH", hlu.location, ioutil.NopCloser(r)) if err != nil { return 0, err } From a9b0f49c8bfe10e182d36549a1c0313afbbf99d7 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 11 May 2015 18:11:08 -0700 Subject: [PATCH 115/501] Removed unused mirror flags Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/repository.go | 2 -- docs/client/session.go | 7 ++++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index c79c306b..e7fcfa9f 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -40,7 +40,6 @@ func NewRepository(ctx context.Context, name, endpoint string, repoConfig *Repos ub: ub, name: name, context: ctx, - mirror: repoConfig.AllowMirrors, }, nil } @@ -49,7 +48,6 @@ type repository struct { ub *v2.URLBuilder context context.Context name string - mirror bool } func (r *repository) Name() string { diff --git a/docs/client/session.go b/docs/client/session.go index 97e932ff..dd8e7d80 100644 --- a/docs/client/session.go +++ b/docs/client/session.go @@ -34,9 +34,10 @@ type CredentialStore interface { // RepositoryConfig holds the base configuration needed to communicate // with a registry including a method of authorization and HTTP headers. type RepositoryConfig struct { - Header http.Header - AuthSource Authorizer - AllowMirrors bool + Header http.Header + AuthSource Authorizer + + //TODO(dmcgowan): Add tls config } // HTTPClient returns a new HTTP client configured for this configuration From 8b0ea19d392c5b5159f80657152c9bfc1b95586a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 12 May 2015 12:04:18 -0700 Subject: [PATCH 116/501] Add base transport to interface Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/session.go | 47 +++++++++++++++++++++++-------------- docs/client/session_test.go | 8 +++---- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/docs/client/session.go b/docs/client/session.go index dd8e7d80..e4e92383 100644 --- a/docs/client/session.go +++ b/docs/client/session.go @@ -37,16 +37,15 @@ type RepositoryConfig struct { Header http.Header AuthSource Authorizer - //TODO(dmcgowan): Add tls config + BaseTransport http.RoundTripper } // HTTPClient returns a new HTTP client configured for this configuration func (rc *RepositoryConfig) HTTPClient() (*http.Client, error) { - // TODO(dmcgowan): create base http.Transport with proper TLS configuration - transport := &Transport{ ExtraHeader: rc.Header, AuthSource: rc.AuthSource, + Base: rc.BaseTransport, } client := &http.Client{ @@ -62,25 +61,27 @@ func (rc *RepositoryConfig) HTTPClient() (*http.Client, error) { // requested. Basic authentication may either be done to the token source or // directly with the requested endpoint depending on the endpoint's // WWW-Authenticate header. -func NewTokenAuthorizer(creds CredentialStore, header http.Header, scope TokenScope) Authorizer { +func NewTokenAuthorizer(creds CredentialStore, transport http.RoundTripper, header http.Header, scope TokenScope) Authorizer { return &tokenAuthorizer{ header: header, challenges: map[string]map[string]authorizationChallenge{}, handlers: []AuthenticationHandler{ - NewTokenHandler(creds, scope, header), + NewTokenHandler(transport, creds, scope, header), NewBasicHandler(creds), }, + transport: transport, } } // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. -func NewAuthorizer(header http.Header, handlers ...AuthenticationHandler) Authorizer { +func NewAuthorizer(transport http.RoundTripper, header http.Header, handlers ...AuthenticationHandler) Authorizer { return &tokenAuthorizer{ header: header, challenges: map[string]map[string]authorizationChallenge{}, handlers: handlers, + transport: transport, } } @@ -88,11 +89,7 @@ type tokenAuthorizer struct { header http.Header challenges map[string]map[string]authorizationChallenge handlers []AuthenticationHandler -} - -func (ta *tokenAuthorizer) client() *http.Client { - // TODO(dmcgowan): Use same transport which has properly configured TLS - return &http.Client{Transport: &Transport{ExtraHeader: ta.header}} + transport http.RoundTripper } func (ta *tokenAuthorizer) ping(endpoint string) (map[string]authorizationChallenge, error) { @@ -101,7 +98,16 @@ func (ta *tokenAuthorizer) ping(endpoint string) (map[string]authorizationChalle return nil, err } - resp, err := ta.client().Do(req) + client := &http.Client{ + Transport: &Transport{ + ExtraHeader: ta.header, + Base: ta.transport, + }, + // Ping should fail fast + Timeout: 5 * time.Second, + } + + resp, err := client.Do(req) if err != nil { return nil, err } @@ -171,9 +177,10 @@ func (ta *tokenAuthorizer) Authorize(req *http.Request) error { } type tokenHandler struct { - header http.Header - creds CredentialStore - scope TokenScope + header http.Header + creds CredentialStore + scope TokenScope + transport http.RoundTripper tokenLock sync.Mutex tokenCache string @@ -190,7 +197,7 @@ type TokenScope struct { // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. -func NewTokenHandler(creds CredentialStore, scope TokenScope, header http.Header) AuthenticationHandler { +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope TokenScope, header http.Header) AuthenticationHandler { return &tokenHandler{ header: header, creds: creds, @@ -203,8 +210,12 @@ func (ts TokenScope) String() string { } func (ts *tokenHandler) client() *http.Client { - // TODO(dmcgowan): Use same transport which has properly configured TLS - return &http.Client{Transport: &Transport{ExtraHeader: ts.header}} + return &http.Client{ + Transport: &Transport{ + ExtraHeader: ts.header, + Base: ts.transport, + }, + } } func (ts *tokenHandler) Scheme() string { diff --git a/docs/client/session_test.go b/docs/client/session_test.go index 87e1e66e..ee306cf6 100644 --- a/docs/client/session_test.go +++ b/docs/client/session_test.go @@ -117,7 +117,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { defer c() repo1Config := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(nil, nil, tokenScope1), + AuthSource: NewTokenAuthorizer(nil, nil, nil, tokenScope1), } client, err := repo1Config.HTTPClient() @@ -142,7 +142,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { defer c2() repo2Config := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(nil, nil, tokenScope2), + AuthSource: NewTokenAuthorizer(nil, nil, nil, tokenScope2), } client2, err := repo2Config.HTTPClient() if err != nil { @@ -221,7 +221,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { password: password, } repoConfig := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(creds, nil, tokenScope), + AuthSource: NewTokenAuthorizer(creds, nil, nil, tokenScope), } client, err := repoConfig.HTTPClient() @@ -266,7 +266,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) { password: password, } repoConfig := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(creds, nil, TokenScope{}), + AuthSource: NewTokenAuthorizer(creds, nil, nil, TokenScope{}), } client, err := repoConfig.HTTPClient() From 89c396e0f5881ad1d1faaa939462b804c235266e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 14 May 2015 09:54:23 -0700 Subject: [PATCH 117/501] Simplify configuration and transport Repository creation now just takes in an http.RoundTripper. Authenticated requests or requests which require additional headers should use the NewTransport function along with a request modifier (such an an authentication handler). Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/layer_upload_test.go | 7 +- docs/client/repository.go | 9 ++- docs/client/repository_test.go | 18 ++--- docs/client/session.go | 121 +++++++++---------------------- docs/client/session_test.go | 37 ++-------- docs/client/transport.go | 57 ++++++++++----- 6 files changed, 95 insertions(+), 154 deletions(-) diff --git a/docs/client/layer_upload_test.go b/docs/client/layer_upload_test.go index 9e22cb7c..3879c867 100644 --- a/docs/client/layer_upload_test.go +++ b/docs/client/layer_upload_test.go @@ -124,13 +124,8 @@ func TestUploadReadFrom(t *testing.T) { e, c := testServer(m) defer c() - repoConfig := &RepositoryConfig{} - client, err := repoConfig.HTTPClient() - if err != nil { - t.Fatalf("Error creating client: %s", err) - } layerUpload := &httpLayerUpload{ - client: client, + client: &http.Client{}, } // Valid case diff --git a/docs/client/repository.go b/docs/client/repository.go index e7fcfa9f..0bd89b11 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -20,7 +20,7 @@ import ( ) // NewRepository creates a new Repository for the given repository name and endpoint -func NewRepository(ctx context.Context, name, endpoint string, repoConfig *RepositoryConfig) (distribution.Repository, error) { +func NewRepository(ctx context.Context, name, endpoint string, transport http.RoundTripper) (distribution.Repository, error) { if err := v2.ValidateRespositoryName(name); err != nil { return nil, err } @@ -30,9 +30,10 @@ func NewRepository(ctx context.Context, name, endpoint string, repoConfig *Repos return nil, err } - client, err := repoConfig.HTTPClient() - if err != nil { - return nil, err + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + // TODO(dmcgowan): create cookie jar } return &repository{ diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index fe8ffeb7..650391c4 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -97,7 +97,7 @@ func TestLayerFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestLayerExists(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) if err != nil { t.Fatal(err) } @@ -227,7 +227,7 @@ func TestLayerUploadChunked(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -334,7 +334,7 @@ func TestLayerUploadMonolithic(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -475,7 +475,7 @@ func TestManifestFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -508,7 +508,7 @@ func TestManifestFetchByTag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -553,7 +553,7 @@ func TestManifestDelete(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -591,7 +591,7 @@ func TestManifestPut(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } @@ -636,7 +636,7 @@ func TestManifestTags(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, &RepositoryConfig{}) + r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } diff --git a/docs/client/session.go b/docs/client/session.go index e4e92383..41bb4f31 100644 --- a/docs/client/session.go +++ b/docs/client/session.go @@ -11,12 +11,6 @@ import ( "time" ) -// Authorizer is used to apply Authorization to an HTTP request -type Authorizer interface { - // Authorizer updates an HTTP request with the needed authorization - Authorize(req *http.Request) error -} - // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -31,54 +25,11 @@ type CredentialStore interface { Basic(*url.URL) (string, string) } -// RepositoryConfig holds the base configuration needed to communicate -// with a registry including a method of authorization and HTTP headers. -type RepositoryConfig struct { - Header http.Header - AuthSource Authorizer - - BaseTransport http.RoundTripper -} - -// HTTPClient returns a new HTTP client configured for this configuration -func (rc *RepositoryConfig) HTTPClient() (*http.Client, error) { - transport := &Transport{ - ExtraHeader: rc.Header, - AuthSource: rc.AuthSource, - Base: rc.BaseTransport, - } - - client := &http.Client{ - Transport: transport, - } - - return client, nil -} - -// NewTokenAuthorizer returns an authorizer which is capable of getting a token -// from a token server. The expected authorization method will be discovered -// by the authorizer, getting the token server endpoint from the URL being -// requested. Basic authentication may either be done to the token source or -// directly with the requested endpoint depending on the endpoint's -// WWW-Authenticate header. -func NewTokenAuthorizer(creds CredentialStore, transport http.RoundTripper, header http.Header, scope TokenScope) Authorizer { - return &tokenAuthorizer{ - header: header, - challenges: map[string]map[string]authorizationChallenge{}, - handlers: []AuthenticationHandler{ - NewTokenHandler(transport, creds, scope, header), - NewBasicHandler(creds), - }, - transport: transport, - } -} - // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. -func NewAuthorizer(transport http.RoundTripper, header http.Header, handlers ...AuthenticationHandler) Authorizer { +func NewAuthorizer(transport http.RoundTripper, handlers ...AuthenticationHandler) RequestModifier { return &tokenAuthorizer{ - header: header, challenges: map[string]map[string]authorizationChallenge{}, handlers: handlers, transport: transport, @@ -86,7 +37,6 @@ func NewAuthorizer(transport http.RoundTripper, header http.Header, handlers ... } type tokenAuthorizer struct { - header http.Header challenges map[string]map[string]authorizationChallenge handlers []AuthenticationHandler transport http.RoundTripper @@ -99,10 +49,7 @@ func (ta *tokenAuthorizer) ping(endpoint string) (map[string]authorizationChalle } client := &http.Client{ - Transport: &Transport{ - ExtraHeader: ta.header, - Base: ta.transport, - }, + Transport: ta.transport, // Ping should fail fast Timeout: 5 * time.Second, } @@ -140,7 +87,7 @@ HeaderLoop: return nil, nil } -func (ta *tokenAuthorizer) Authorize(req *http.Request) error { +func (ta *tokenAuthorizer) ModifyRequest(req *http.Request) error { v2Root := strings.Index(req.URL.Path, "/v2/") if v2Root == -1 { return nil @@ -195,54 +142,52 @@ type TokenScope struct { Actions []string } -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope TokenScope, header http.Header) AuthenticationHandler { - return &tokenHandler{ - header: header, - creds: creds, - scope: scope, - } -} - func (ts TokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } -func (ts *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: &Transport{ - ExtraHeader: ts.header, - Base: ts.transport, - }, +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope TokenScope) AuthenticationHandler { + return &tokenHandler{ + transport: transport, + creds: creds, + scope: scope, } } -func (ts *tokenHandler) Scheme() string { +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { return "bearer" } -func (ts *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if err := ts.refreshToken(params); err != nil { +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if err := th.refreshToken(params); err != nil { return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", ts.tokenCache)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) return nil } -func (ts *tokenHandler) refreshToken(params map[string]string) error { - ts.tokenLock.Lock() - defer ts.tokenLock.Unlock() +func (th *tokenHandler) refreshToken(params map[string]string) error { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() now := time.Now() - if now.After(ts.tokenExpiration) { - token, err := ts.fetchToken(params) + if now.After(th.tokenExpiration) { + token, err := th.fetchToken(params) if err != nil { return err } - ts.tokenCache = token - ts.tokenExpiration = now.Add(time.Minute) + th.tokenCache = token + th.tokenExpiration = now.Add(time.Minute) } return nil @@ -252,7 +197,7 @@ type tokenResponse struct { Token string `json:"token"` } -func (ts *tokenHandler) fetchToken(params map[string]string) (token string, err error) { +func (th *tokenHandler) fetchToken(params map[string]string) (token string, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { @@ -273,7 +218,7 @@ func (ts *tokenHandler) fetchToken(params map[string]string) (token string, err reqParams := req.URL.Query() service := params["service"] - scope := ts.scope.String() + scope := th.scope.String() if service != "" { reqParams.Add("service", service) @@ -283,8 +228,8 @@ func (ts *tokenHandler) fetchToken(params map[string]string) (token string, err reqParams.Add("scope", scopeField) } - if ts.creds != nil { - username, password := ts.creds.Basic(realmURL) + if th.creds != nil { + username, password := th.creds.Basic(realmURL) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) @@ -293,7 +238,7 @@ func (ts *tokenHandler) fetchToken(params map[string]string) (token string, err req.URL.RawQuery = reqParams.Encode() - resp, err := ts.client().Do(req) + resp, err := th.client().Do(req) if err != nil { return "", err } diff --git a/docs/client/session_test.go b/docs/client/session_test.go index ee306cf6..cf8e546e 100644 --- a/docs/client/session_test.go +++ b/docs/client/session_test.go @@ -116,14 +116,8 @@ func TestEndpointAuthorizeToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - repo1Config := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(nil, nil, nil, tokenScope1), - } - - client, err := repo1Config.HTTPClient() - if err != nil { - t.Fatalf("Error creating http client: %s", err) - } + transport1 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, nil, tokenScope1))) + client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) @@ -141,13 +135,8 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() - repo2Config := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(nil, nil, nil, tokenScope2), - } - client2, err := repo2Config.HTTPClient() - if err != nil { - t.Fatalf("Error creating http client: %s", err) - } + transport2 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, nil, tokenScope2))) + client2 := &http.Client{Transport: transport2} req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) resp, err = client2.Do(req) @@ -220,14 +209,9 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { username: username, password: password, } - repoConfig := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(creds, nil, nil, tokenScope), - } - client, err := repoConfig.HTTPClient() - if err != nil { - t.Fatalf("Error creating http client: %s", err) - } + transport1 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, creds, tokenScope), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) @@ -265,14 +249,9 @@ func TestEndpointAuthorizeBasic(t *testing.T) { username: username, password: password, } - repoConfig := &RepositoryConfig{ - AuthSource: NewTokenAuthorizer(creds, nil, nil, TokenScope{}), - } - client, err := repoConfig.HTTPClient() - if err != nil { - t.Fatalf("Error creating http client: %s", err) - } + transport1 := NewTransport(nil, NewAuthorizer(nil, NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) diff --git a/docs/client/transport.go b/docs/client/transport.go index e92ba543..0b241619 100644 --- a/docs/client/transport.go +++ b/docs/client/transport.go @@ -6,14 +6,36 @@ import ( "sync" ) -// Transport is an http.RoundTripper that makes registry HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// from an Auth source -type Transport struct { - AuthSource Authorizer - ExtraHeader http.Header +type RequestModifier interface { + ModifyRequest(*http.Request) error +} - Base http.RoundTripper +type headerModifier http.Header + +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified @@ -22,13 +44,14 @@ type Transport struct { // RoundTrip authorizes and authenticates the request with an // access token. If no token exists or token is expired, // tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := t.cloneRequest(req) - if t.AuthSource != nil { - if err := t.AuthSource.Authorize(req2); err != nil { +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { return nil, err } } + t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) if err != nil { @@ -43,7 +66,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { } // CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { +func (t *transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } @@ -56,14 +79,14 @@ func (t *Transport) CancelRequest(req *http.Request) { } } -func (t *Transport) base() http.RoundTripper { +func (t *transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } -func (t *Transport) setModReq(orig, mod *http.Request) { +func (t *transport) setModReq(orig, mod *http.Request) { t.mu.Lock() defer t.mu.Unlock() if t.modReq == nil { @@ -78,7 +101,7 @@ func (t *Transport) setModReq(orig, mod *http.Request) { // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. -func (t *Transport) cloneRequest(r *http.Request) *http.Request { +func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r @@ -87,9 +110,7 @@ func (t *Transport) cloneRequest(r *http.Request) *http.Request { for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } - for k, s := range t.ExtraHeader { - r2.Header[k] = append(r2.Header[k], s...) - } + return r2 } From 6bf4c45e52078dc51b77ab477d9c0798f470107a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 14 May 2015 10:18:21 -0700 Subject: [PATCH 118/501] Add missing defer on Tags Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/repository.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/client/repository.go b/docs/client/repository.go index 0bd89b11..4055577d 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -99,6 +99,7 @@ func (ms *manifests) Tags() ([]string, error) { if err != nil { return nil, err } + defer resp.Body.Close() switch { case resp.StatusCode == http.StatusOK: From 67e2e83434225becedf530dde7064ab5ac18ee14 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 13:29:44 -0700 Subject: [PATCH 119/501] Update to use blob interfaces Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/layer.go | 113 +++++++++++++------------------ docs/client/layer_upload.go | 72 ++++++++++---------- docs/client/layer_upload_test.go | 30 ++++---- docs/client/repository.go | 103 ++++++++++++++++------------ docs/client/repository_test.go | 85 ++++++++++++----------- 5 files changed, 204 insertions(+), 199 deletions(-) diff --git a/docs/client/layer.go b/docs/client/layer.go index b6e1697d..e7c0039c 100644 --- a/docs/client/layer.go +++ b/docs/client/layer.go @@ -8,18 +8,15 @@ import ( "io/ioutil" "net/http" "os" - "time" + "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" ) -type httpLayer struct { - *layers +type httpBlob struct { + *repository - size int64 - digest digest.Digest - createdAt time.Time + desc distribution.Descriptor rc io.ReadCloser // remote read closer brd *bufio.Reader // internal buffered io @@ -27,48 +24,40 @@ type httpLayer struct { err error } -func (hl *httpLayer) CreatedAt() time.Time { - return hl.createdAt -} - -func (hl *httpLayer) Digest() digest.Digest { - return hl.digest -} - -func (hl *httpLayer) Read(p []byte) (n int, err error) { - if hl.err != nil { - return 0, hl.err +func (hb *httpBlob) Read(p []byte) (n int, err error) { + if hb.err != nil { + return 0, hb.err } - rd, err := hl.reader() + rd, err := hb.reader() if err != nil { return 0, err } n, err = rd.Read(p) - hl.offset += int64(n) + hb.offset += int64(n) // Simulate io.EOF error if we reach filesize. - if err == nil && hl.offset >= hl.size { + if err == nil && hb.offset >= hb.desc.Length { err = io.EOF } return n, err } -func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { - if hl.err != nil { - return 0, hl.err +func (hb *httpBlob) Seek(offset int64, whence int) (int64, error) { + if hb.err != nil { + return 0, hb.err } var err error - newOffset := hl.offset + newOffset := hb.offset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: - newOffset = hl.size + int64(offset) + newOffset = hb.desc.Length + int64(offset) case os.SEEK_SET: newOffset = int64(offset) } @@ -76,60 +65,60 @@ func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { if newOffset < 0 { err = fmt.Errorf("cannot seek to negative position") } else { - if hl.offset != newOffset { - hl.reset() + if hb.offset != newOffset { + hb.reset() } // No problems, set the offset. - hl.offset = newOffset + hb.offset = newOffset } - return hl.offset, err + return hb.offset, err } -func (hl *httpLayer) Close() error { - if hl.err != nil { - return hl.err +func (hb *httpBlob) Close() error { + if hb.err != nil { + return hb.err } // close and release reader chain - if hl.rc != nil { - hl.rc.Close() + if hb.rc != nil { + hb.rc.Close() } - hl.rc = nil - hl.brd = nil + hb.rc = nil + hb.brd = nil - hl.err = fmt.Errorf("httpLayer: closed") + hb.err = fmt.Errorf("httpBlob: closed") return nil } -func (hl *httpLayer) reset() { - if hl.err != nil { +func (hb *httpBlob) reset() { + if hb.err != nil { return } - if hl.rc != nil { - hl.rc.Close() - hl.rc = nil + if hb.rc != nil { + hb.rc.Close() + hb.rc = nil } } -func (hl *httpLayer) reader() (io.Reader, error) { - if hl.err != nil { - return nil, hl.err +func (hb *httpBlob) reader() (io.Reader, error) { + if hb.err != nil { + return nil, hb.err } - if hl.rc != nil { - return hl.brd, nil + if hb.rc != nil { + return hb.brd, nil } // If the offset is great than or equal to size, return a empty, noop reader. - if hl.offset >= hl.size { + if hb.offset >= hb.desc.Length { return ioutil.NopCloser(bytes.NewReader([]byte{})), nil } - blobURL, err := hl.ub.BuildBlobURL(hl.name, hl.digest) + blobURL, err := hb.ub.BuildBlobURL(hb.name, hb.desc.Digest) if err != nil { return nil, err } @@ -139,40 +128,32 @@ func (hl *httpLayer) reader() (io.Reader, error) { return nil, err } - if hl.offset > 0 { + if hb.offset > 0 { // TODO(stevvooe): Get this working correctly. // If we are at different offset, issue a range request from there. req.Header.Add("Range", fmt.Sprintf("1-")) - context.GetLogger(hl.context).Infof("Range: %s", req.Header.Get("Range")) + context.GetLogger(hb.context).Infof("Range: %s", req.Header.Get("Range")) } - resp, err := hl.client.Do(req) + resp, err := hb.client.Do(req) if err != nil { return nil, err } switch { case resp.StatusCode == 200: - hl.rc = resp.Body + hb.rc = resp.Body default: defer resp.Body.Close() return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } - if hl.brd == nil { - hl.brd = bufio.NewReader(hl.rc) + if hb.brd == nil { + hb.brd = bufio.NewReader(hb.rc) } else { - hl.brd.Reset(hl.rc) + hb.brd.Reset(hb.rc) } - return hl.brd, nil -} - -func (hl *httpLayer) Length() int64 { - return hl.size -} - -func (hl *httpLayer) Handler(r *http.Request) (http.Handler, error) { - panic("Not implemented") + return hb.brd, nil } diff --git a/docs/client/layer_upload.go b/docs/client/layer_upload.go index 18e5fbab..3697ef8c 100644 --- a/docs/client/layer_upload.go +++ b/docs/client/layer_upload.go @@ -11,10 +11,10 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/context" ) -type httpLayerUpload struct { +type httpBlobUpload struct { repo distribution.Repository client *http.Client @@ -26,32 +26,32 @@ type httpLayerUpload struct { closed bool } -func (hlu *httpLayerUpload) handleErrorResponse(resp *http.Response) error { +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { - return &BlobUploadNotFoundError{Location: hlu.location} + return &BlobUploadNotFoundError{Location: hbu.location} } return handleErrorResponse(resp) } -func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hlu.location, ioutil.NopCloser(r)) +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) if err != nil { return 0, err } defer req.Body.Close() - resp, err := hlu.client.Do(req) + resp, err := hbu.client.Do(req) if err != nil { return 0, err } if resp.StatusCode != http.StatusAccepted { - return 0, hlu.handleErrorResponse(resp) + return 0, hbu.handleErrorResponse(resp) } // TODO(dmcgowan): Validate headers - hlu.uuid = resp.Header.Get("Docker-Upload-UUID") - hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } @@ -67,27 +67,27 @@ func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { } -func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hlu.location, bytes.NewReader(p)) +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) if err != nil { return 0, err } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hlu.offset, hlu.offset+int64(len(p)-1))) + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) req.Header.Set("Content-Type", "application/octet-stream") - resp, err := hlu.client.Do(req) + resp, err := hbu.client.Do(req) if err != nil { return 0, err } if resp.StatusCode != http.StatusAccepted { - return 0, hlu.handleErrorResponse(resp) + return 0, hbu.handleErrorResponse(resp) } // TODO(dmcgowan): Validate headers - hlu.uuid = resp.Header.Get("Docker-Upload-UUID") - hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } @@ -103,8 +103,8 @@ func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { } -func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hlu.offset +func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { + newOffset := hbu.offset switch whence { case os.SEEK_CUR: @@ -115,47 +115,47 @@ func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { newOffset = int64(offset) } - hlu.offset = newOffset + hbu.offset = newOffset - return hlu.offset, nil + return hbu.offset, nil } -func (hlu *httpLayerUpload) UUID() string { - return hlu.uuid +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid } -func (hlu *httpLayerUpload) StartedAt() time.Time { - return hlu.startedAt +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt } -func (hlu *httpLayerUpload) Finish(digest digest.Digest) (distribution.Layer, error) { +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hlu.location, nil) + req, err := http.NewRequest("PUT", hbu.location, nil) if err != nil { - return nil, err + return distribution.Descriptor{}, err } values := req.URL.Query() - values.Set("digest", digest.String()) + values.Set("digest", desc.Digest.String()) req.URL.RawQuery = values.Encode() - resp, err := hlu.client.Do(req) + resp, err := hbu.client.Do(req) if err != nil { - return nil, err + return distribution.Descriptor{}, err } if resp.StatusCode != http.StatusCreated { - return nil, hlu.handleErrorResponse(resp) + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } - return hlu.repo.Layers().Fetch(digest) + return hbu.repo.Blobs(ctx).Stat(ctx, desc.Digest) } -func (hlu *httpLayerUpload) Cancel() error { +func (hbu *httpBlobUpload) Rollback(ctx context.Context) error { panic("not implemented") } -func (hlu *httpLayerUpload) Close() error { - hlu.closed = true +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true return nil } diff --git a/docs/client/layer_upload_test.go b/docs/client/layer_upload_test.go index 3879c867..2e4edc45 100644 --- a/docs/client/layer_upload_test.go +++ b/docs/client/layer_upload_test.go @@ -11,8 +11,8 @@ import ( "github.com/docker/distribution/testutil" ) -// Test implements distribution.LayerUpload -var _ distribution.LayerUpload = &httpLayerUpload{} +// Test implements distribution.BlobWriter +var _ distribution.BlobWriter = &httpBlobUpload{} func TestUploadReadFrom(t *testing.T) { _, b := newRandomBlob(64) @@ -124,13 +124,13 @@ func TestUploadReadFrom(t *testing.T) { e, c := testServer(m) defer c() - layerUpload := &httpLayerUpload{ + blobUpload := &httpBlobUpload{ client: &http.Client{}, } // Valid case - layerUpload.location = e + locationPath - n, err := layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + n, err := blobUpload.ReadFrom(bytes.NewReader(b)) if err != nil { t.Fatalf("Error calling ReadFrom: %s", err) } @@ -139,15 +139,15 @@ func TestUploadReadFrom(t *testing.T) { } // Bad range - layerUpload.location = e + locationPath - _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when bad range received") } // 404 - layerUpload.location = e + locationPath - _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } @@ -158,8 +158,8 @@ func TestUploadReadFrom(t *testing.T) { } // 400 valid json - layerUpload.location = e + locationPath - _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } @@ -181,8 +181,8 @@ func TestUploadReadFrom(t *testing.T) { } // 400 invalid json - layerUpload.location = e + locationPath - _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } @@ -196,8 +196,8 @@ func TestUploadReadFrom(t *testing.T) { } // 500 - layerUpload.location = e + locationPath - _, err = layerUpload.ReadFrom(bytes.NewReader(b)) + blobUpload.location = e + locationPath + _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } diff --git a/docs/client/repository.go b/docs/client/repository.go index 4055577d..940ae1df 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -55,8 +56,8 @@ func (r *repository) Name() string { return r.name } -func (r *repository) Layers() distribution.LayerService { - return &layers{ +func (r *repository) Blobs(ctx context.Context) distribution.BlobService { + return &blobs{ repository: r, } } @@ -229,7 +230,7 @@ func (ms *manifests) Delete(dgst digest.Digest) error { } } -type layers struct { +type blobs struct { *repository } @@ -254,25 +255,55 @@ func sanitizeLocation(location, source string) (string, error) { return location, nil } -func (ls *layers) Exists(dgst digest.Digest) (bool, error) { - _, err := ls.fetchLayer(dgst) +func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + desc, err := ls.Stat(ctx, dgst) if err != nil { - switch err := err.(type) { - case distribution.ErrUnknownLayer: - return false, nil - default: - return false, err - } + return nil, err + } + reader, err := ls.Open(ctx, desc) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (ls *blobs) Open(ctx context.Context, desc distribution.Descriptor) (distribution.ReadSeekCloser, error) { + return &httpBlob{ + repository: ls.repository, + desc: desc, + }, nil +} + +func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, desc distribution.Descriptor) error { + return nil +} + +func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := ls.Writer(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.NewCanonicalDigester() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr)) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } - return true, nil + desc := distribution.Descriptor{ + MediaType: mediaType, + Length: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) } -func (ls *layers) Fetch(dgst digest.Digest) (distribution.Layer, error) { - return ls.fetchLayer(dgst) -} - -func (ls *layers) Upload() (distribution.LayerUpload, error) { +func (ls *blobs) Writer(ctx context.Context) (distribution.BlobWriter, error) { u, err := ls.ub.BuildBlobUploadURL(ls.name) resp, err := ls.client.Post(u, "", nil) @@ -290,7 +321,7 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { return nil, err } - return &httpLayerUpload{ + return &httpBlobUpload{ repo: ls.repository, client: ls.client, uuid: uuid, @@ -302,19 +333,19 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { } } -func (ls *layers) Resume(uuid string) (distribution.LayerUpload, error) { +func (ls *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } -func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { +func (ls *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { u, err := ls.ub.BuildBlobURL(ls.name, dgst) if err != nil { - return nil, err + return distribution.Descriptor{}, err } resp, err := ls.client.Head(u) if err != nil { - return nil, err + return distribution.Descriptor{}, err } defer resp.Body.Close() @@ -323,31 +354,17 @@ func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { lengthHeader := resp.Header.Get("Content-Length") length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { - return nil, fmt.Errorf("error parsing content-length: %v", err) + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } - var t time.Time - lastModified := resp.Header.Get("Last-Modified") - if lastModified != "" { - t, err = http.ParseTime(lastModified) - if err != nil { - return nil, fmt.Errorf("error parsing last-modified: %v", err) - } - } - - return &httpLayer{ - layers: ls, - size: length, - digest: dgst, - createdAt: t, + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Length: length, + Digest: dgst, }, nil case resp.StatusCode == http.StatusNotFound: - return nil, distribution.ErrUnknownLayer{ - FSLayer: manifest.FSLayer{ - BlobSum: dgst, - }, - } + return distribution.Descriptor{}, distribution.ErrBlobUnknown default: - return nil, handleErrorResponse(resp) + return distribution.Descriptor{}, handleErrorResponse(resp) } } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 650391c4..514f3ee2 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -5,7 +5,6 @@ import ( "crypto/rand" "encoding/json" "fmt" - "io/ioutil" "log" "net/http" "net/http/httptest" @@ -15,6 +14,7 @@ import ( "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" @@ -88,7 +88,7 @@ func addPing(m *testutil.RequestResponseMap) { }) } -func TestLayerFetch(t *testing.T) { +func TestBlobFetch(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) @@ -97,17 +97,14 @@ func TestLayerFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) if err != nil { t.Fatal(err) } - l := r.Layers() + l := r.Blobs(ctx) - layer, err := l.Fetch(d1) - if err != nil { - t.Fatal(err) - } - b, err := ioutil.ReadAll(layer) + b, err := l.Get(ctx, d1) if err != nil { t.Fatal(err) } @@ -118,7 +115,7 @@ func TestLayerFetch(t *testing.T) { // TODO(dmcgowan): Test error cases } -func TestLayerExists(t *testing.T) { +func TestBlobExists(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) @@ -127,24 +124,30 @@ func TestLayerExists(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) if err != nil { t.Fatal(err) } - l := r.Layers() + l := r.Blobs(ctx) - ok, err := l.Exists(d1) + stat, err := l.Stat(ctx, d1) if err != nil { t.Fatal(err) } - if !ok { - t.Fatalf("Blob does not exist: %s", d1) + + if stat.Digest != d1 { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) } - // TODO(dmcgowan): Test error cases + if stat.Length != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Length, len(b1)) + } + + // TODO(dmcgowan): Test error cases and ErrBlobUnknown case } -func TestLayerUploadChunked(t *testing.T) { +func TestBlobUploadChunked(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addPing(&m) @@ -227,19 +230,20 @@ func TestLayerUploadChunked(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } - l := r.Layers() + l := r.Blobs(ctx) - upload, err := l.Upload() + upload, err := l.Writer(ctx) if err != nil { t.Fatal(err) } - if upload.UUID() != uuids[0] { - log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uuids[0]) + if upload.ID() != uuids[0] { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) } for _, chunk := range chunks { @@ -252,17 +256,20 @@ func TestLayerUploadChunked(t *testing.T) { } } - layer, err := upload.Finish(dgst) + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Length: int64(len(b1)), + }) if err != nil { t.Fatal(err) } - if layer.Length() != int64(len(b1)) { - t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) + if blob.Length != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) } } -func TestLayerUploadMonolithic(t *testing.T) { +func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addPing(&m) @@ -334,19 +341,20 @@ func TestLayerUploadMonolithic(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } - l := r.Layers() + l := r.Blobs(ctx) - upload, err := l.Upload() + upload, err := l.Writer(ctx) if err != nil { t.Fatal(err) } - if upload.UUID() != uploadID { - log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uploadID) + if upload.ID() != uploadID { + log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) } n, err := upload.ReadFrom(bytes.NewReader(b1)) @@ -357,20 +365,19 @@ func TestLayerUploadMonolithic(t *testing.T) { t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) } - layer, err := upload.Finish(dgst) + blob, err := upload.Commit(ctx, distribution.Descriptor{ + Digest: dgst, + Length: int64(len(b1)), + }) if err != nil { t.Fatal(err) } - if layer.Length() != int64(len(b1)) { - t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) + if blob.Length != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) } } -func TestLayerUploadResume(t *testing.T) { - // TODO(dmcgowan): implement -} - func newRandomSchema1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { blobs := make([]manifest.FSLayer, blobCount) history := make([]manifest.History, blobCount) @@ -447,7 +454,7 @@ func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) } if len(m1.FSLayers) != len(m2.FSLayers) { - return fmt.Errorf("fs layer length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) + return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) } for i := range m1.FSLayers { if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { From 60b314ade54d2326e921c1d21c7ab095e63d40b7 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 13:31:28 -0700 Subject: [PATCH 120/501] Rename layer files to blob Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/{layer.go => blob.go} | 0 docs/client/{layer_upload.go => blob_writer.go} | 0 docs/client/{layer_upload_test.go => blob_writer_test.go} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename docs/client/{layer.go => blob.go} (100%) rename docs/client/{layer_upload.go => blob_writer.go} (100%) rename docs/client/{layer_upload_test.go => blob_writer_test.go} (100%) diff --git a/docs/client/layer.go b/docs/client/blob.go similarity index 100% rename from docs/client/layer.go rename to docs/client/blob.go diff --git a/docs/client/layer_upload.go b/docs/client/blob_writer.go similarity index 100% rename from docs/client/layer_upload.go rename to docs/client/blob_writer.go diff --git a/docs/client/layer_upload_test.go b/docs/client/blob_writer_test.go similarity index 100% rename from docs/client/layer_upload_test.go rename to docs/client/blob_writer_test.go From 568df315fff0e8514cd262d75a62f6c00228ffe9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 15:54:04 -0700 Subject: [PATCH 121/501] Open cache interface Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/storage/blobcachemetrics.go | 60 +++++++++++++ .../cache/cachedblobdescriptorstore.go | 80 ++++++++++++++++++ docs/storage/cachedblobdescriptorstore.go | 84 ------------------- docs/storage/registry.go | 10 +-- 4 files changed, 142 insertions(+), 92 deletions(-) create mode 100644 docs/storage/blobcachemetrics.go create mode 100644 docs/storage/cache/cachedblobdescriptorstore.go delete mode 100644 docs/storage/cachedblobdescriptorstore.go diff --git a/docs/storage/blobcachemetrics.go b/docs/storage/blobcachemetrics.go new file mode 100644 index 00000000..fad0a77a --- /dev/null +++ b/docs/storage/blobcachemetrics.go @@ -0,0 +1,60 @@ +package storage + +import ( + "expvar" + "sync/atomic" + + "github.com/docker/distribution/registry/storage/cache" +) + +type blobStatCollector struct { + metrics cache.Metrics +} + +func (bsc *blobStatCollector) Hit() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Hits, 1) +} + +func (bsc *blobStatCollector) Miss() { + atomic.AddUint64(&bsc.metrics.Requests, 1) + atomic.AddUint64(&bsc.metrics.Misses, 1) +} + +func (bsc *blobStatCollector) Metrics() cache.Metrics { + return bsc.metrics +} + +// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor +// cache requests. Note this is kept globally and made available via expvar. +// For more detailed metrics, its recommend to instrument a particular cache +// implementation. +var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + cache := registry.(*expvar.Map).Get("cache") + if cache == nil { + cache = &expvar.Map{} + cache.(*expvar.Map).Init() + registry.(*expvar.Map).Set("cache", cache) + } + + storage := cache.(*expvar.Map).Get("storage") + if storage == nil { + storage = &expvar.Map{} + storage.(*expvar.Map).Init() + cache.(*expvar.Map).Set("storage", storage) + } + + storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { + // no need for synchronous access: the increments are atomic and + // during reading, we don't care if the data is up to date. The + // numbers will always *eventually* be reported correctly. + return blobStatterCacheMetrics + })) +} diff --git a/docs/storage/cache/cachedblobdescriptorstore.go b/docs/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 00000000..a095b19a --- /dev/null +++ b/docs/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,80 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobStatter + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobStatter) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobStatter, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err +} diff --git a/docs/storage/cachedblobdescriptorstore.go b/docs/storage/cachedblobdescriptorstore.go deleted file mode 100644 index a0ccd067..00000000 --- a/docs/storage/cachedblobdescriptorstore.go +++ /dev/null @@ -1,84 +0,0 @@ -package storage - -import ( - "expvar" - "sync/atomic" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobStatter -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - atomic.AddUint64(&blobStatterCacheMetrics.Stat.Requests, 1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - atomic.AddUint64(&blobStatterCacheMetrics.Stat.Hits, 1) - return desc, nil -fallback: - atomic.AddUint64(&blobStatterCacheMetrics.Stat.Misses, 1) - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err -} - -// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor -// cache requests. Note this is kept globally and made available via expvar. -// For more detailed metrics, its recommend to instrument a particular cache -// implementation. -var blobStatterCacheMetrics struct { - // Stat tracks calls to the caches. - Stat struct { - Requests uint64 - Hits uint64 - Misses uint64 - } -} - -func init() { - registry := expvar.Get("registry") - if registry == nil { - registry = expvar.NewMap("registry") - } - - cache := registry.(*expvar.Map).Get("cache") - if cache == nil { - cache = &expvar.Map{} - cache.(*expvar.Map).Init() - registry.(*expvar.Map).Set("cache", cache) - } - - storage := cache.(*expvar.Map).Get("storage") - if storage == nil { - storage = &expvar.Map{} - storage.(*expvar.Map).Init() - cache.(*expvar.Map).Set("storage", storage) - } - - storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { - // no need for synchronous access: the increments are atomic and - // during reading, we don't care if the data is up to date. The - // numbers will always *eventually* be reported correctly. - return blobStatterCacheMetrics - })) -} diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 659c789e..cc223727 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -29,10 +29,7 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv } if blobDescriptorCacheProvider != nil { - statter = &cachedBlobStatter{ - cache: blobDescriptorCacheProvider, - backend: statter, - } + statter = cache.NewCachedBlobStatter(blobDescriptorCacheProvider, statter) } bs := &blobStore{ @@ -143,10 +140,7 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { } if repo.descriptorCache != nil { - statter = &cachedBlobStatter{ - cache: repo.descriptorCache, - backend: statter, - } + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) } return &linkedBlobStore{ From 296a8415b9e214e4c99ade87471aea74b65cfb96 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 16:25:00 -0700 Subject: [PATCH 122/501] Update to track refactor updates Added use of cache blob statter Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/blob.go | 159 -------------------------------- docs/client/blob_writer.go | 2 +- docs/client/http_reader.go | 164 +++++++++++++++++++++++++++++++++ docs/client/repository.go | 45 ++++++--- docs/client/repository_test.go | 4 +- 5 files changed, 201 insertions(+), 173 deletions(-) delete mode 100644 docs/client/blob.go create mode 100644 docs/client/http_reader.go diff --git a/docs/client/blob.go b/docs/client/blob.go deleted file mode 100644 index e7c0039c..00000000 --- a/docs/client/blob.go +++ /dev/null @@ -1,159 +0,0 @@ -package client - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlob struct { - *repository - - desc distribution.Descriptor - - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 - err error -} - -func (hb *httpBlob) Read(p []byte) (n int, err error) { - if hb.err != nil { - return 0, hb.err - } - - rd, err := hb.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hb.offset += int64(n) - - // Simulate io.EOF error if we reach filesize. - if err == nil && hb.offset >= hb.desc.Length { - err = io.EOF - } - - return n, err -} - -func (hb *httpBlob) Seek(offset int64, whence int) (int64, error) { - if hb.err != nil { - return 0, hb.err - } - - var err error - newOffset := hb.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = hb.desc.Length + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - if hb.offset != newOffset { - hb.reset() - } - - // No problems, set the offset. - hb.offset = newOffset - } - - return hb.offset, err -} - -func (hb *httpBlob) Close() error { - if hb.err != nil { - return hb.err - } - - // close and release reader chain - if hb.rc != nil { - hb.rc.Close() - } - - hb.rc = nil - hb.brd = nil - - hb.err = fmt.Errorf("httpBlob: closed") - - return nil -} - -func (hb *httpBlob) reset() { - if hb.err != nil { - return - } - if hb.rc != nil { - hb.rc.Close() - hb.rc = nil - } -} - -func (hb *httpBlob) reader() (io.Reader, error) { - if hb.err != nil { - return nil, hb.err - } - - if hb.rc != nil { - return hb.brd, nil - } - - // If the offset is great than or equal to size, return a empty, noop reader. - if hb.offset >= hb.desc.Length { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - - blobURL, err := hb.ub.BuildBlobURL(hb.name, hb.desc.Digest) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", blobURL, nil) - if err != nil { - return nil, err - } - - if hb.offset > 0 { - // TODO(stevvooe): Get this working correctly. - - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("1-")) - context.GetLogger(hb.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hb.client.Do(req) - if err != nil { - return nil, err - } - - switch { - case resp.StatusCode == 200: - hb.rc = resp.Body - default: - defer resp.Body.Close() - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - if hb.brd == nil { - hb.brd = bufio.NewReader(hb.rc) - } else { - hb.brd.Reset(hb.rc) - } - - return hb.brd, nil -} diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 3697ef8c..44151167 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -151,7 +151,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip return hbu.repo.Blobs(ctx).Stat(ctx, desc.Digest) } -func (hbu *httpBlobUpload) Rollback(ctx context.Context) error { +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { panic("not implemented") } diff --git a/docs/client/http_reader.go b/docs/client/http_reader.go new file mode 100644 index 00000000..22f9bfbc --- /dev/null +++ b/docs/client/http_reader.go @@ -0,0 +1,164 @@ +package client + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/docker/distribution" +) + +func NewHTTPReadSeeker(client *http.Client, url string, size int64) distribution.ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + size: size, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + size int64 + + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + + // Simulate io.EOF error if we reach filesize. + if err == nil && hrs.offset >= hrs.size { + err = io.EOF + } + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + var err error + newOffset := hrs.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = hrs.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + if hrs.offset != newOffset { + hrs.reset() + } + + // No problems, set the offset. + hrs.offset = newOffset + } + + return hrs.offset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + hrs.brd = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.brd, nil + } + + // If the offset is great than or equal to size, return a empty, noop reader. + if hrs.offset >= hrs.size { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.offset > 0 { + // TODO(stevvooe): Get this working correctly. + + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", "1-") + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + switch { + case resp.StatusCode == 200: + hrs.rc = resp.Body + default: + defer resp.Body.Close() + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + if hrs.brd == nil { + hrs.brd = bufio.NewReader(hrs.rc) + } else { + hrs.brd.Reset(hrs.rc) + } + + return hrs.brd, nil +} diff --git a/docs/client/repository.go b/docs/client/repository.go index 940ae1df..61dcf0f4 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" ) // NewRepository creates a new Repository for the given repository name and endpoint @@ -56,9 +57,13 @@ func (r *repository) Name() string { return r.name } -func (r *repository) Blobs(ctx context.Context) distribution.BlobService { +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + repository: r, + } return &blobs{ repository: r, + statter: cache.NewCachedBlobStatter(cache.NewInMemoryBlobDescriptorCacheProvider(), statter), } } @@ -232,6 +237,8 @@ func (ms *manifests) Delete(dgst digest.Digest) error { type blobs struct { *repository + + statter distribution.BlobStatter } func sanitizeLocation(location, source string) (string, error) { @@ -255,12 +262,17 @@ func sanitizeLocation(location, source string) (string, error) { return location, nil } +func (ls *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return ls.statter.Stat(ctx, dgst) + +} + func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { desc, err := ls.Stat(ctx, dgst) if err != nil { return nil, err } - reader, err := ls.Open(ctx, desc) + reader, err := ls.Open(ctx, desc.Digest) if err != nil { return nil, err } @@ -269,19 +281,26 @@ func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { return ioutil.ReadAll(reader) } -func (ls *blobs) Open(ctx context.Context, desc distribution.Descriptor) (distribution.ReadSeekCloser, error) { - return &httpBlob{ - repository: ls.repository, - desc: desc, - }, nil +func (ls *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + stat, err := ls.statter.Stat(ctx, dgst) + if err != nil { + return nil, err + } + + blobURL, err := ls.ub.BuildBlobURL(ls.Name(), stat.Digest) + if err != nil { + return nil, err + } + + return NewHTTPReadSeeker(ls.repository.client, blobURL, stat.Length), nil } -func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, desc distribution.Descriptor) error { +func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { return nil } func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := ls.Writer(ctx) + writer, err := ls.Create(ctx) if err != nil { return distribution.Descriptor{}, err } @@ -303,7 +322,7 @@ func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (ls *blobs) Writer(ctx context.Context) (distribution.BlobWriter, error) { +func (ls *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { u, err := ls.ub.BuildBlobUploadURL(ls.name) resp, err := ls.client.Post(u, "", nil) @@ -337,7 +356,11 @@ func (ls *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter panic("not implemented") } -func (ls *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +type blobStatter struct { + *repository +} + +func (ls *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { u, err := ls.ub.BuildBlobURL(ls.name, dgst) if err != nil { return distribution.Descriptor{}, err diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 514f3ee2..f0f40316 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -237,7 +237,7 @@ func TestBlobUploadChunked(t *testing.T) { } l := r.Blobs(ctx) - upload, err := l.Writer(ctx) + upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestBlobUploadMonolithic(t *testing.T) { } l := r.Blobs(ctx) - upload, err := l.Writer(ctx) + upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } From e0e13209d84f84ce1af6da47fc1a2198a2f6fe35 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 16:34:00 -0700 Subject: [PATCH 123/501] Remove unused and duplicate error types Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/blob_writer.go | 2 +- docs/client/blob_writer_test.go | 6 ++---- docs/client/errors.go | 28 ---------------------------- 3 files changed, 3 insertions(+), 33 deletions(-) diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 44151167..06ca8738 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -28,7 +28,7 @@ type httpBlobUpload struct { func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { - return &BlobUploadNotFoundError{Location: hbu.location} + return distribution.ErrBlobUploadUnknown } return handleErrorResponse(resp) } diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 2e4edc45..0cc20da4 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -151,10 +151,8 @@ func TestUploadReadFrom(t *testing.T) { if err == nil { t.Fatalf("Expected error when not found") } - if blobErr, ok := err.(*BlobUploadNotFoundError); !ok { - t.Fatalf("Wrong error type %T: %s", err, err) - } else if expected := e + locationPath; blobErr.Location != expected { - t.Fatalf("Unexpected location: %s, expected %s", blobErr.Location, expected) + if err != distribution.ErrBlobUploadUnknown { + t.Fatalf("Wrong error thrown: %s, expected", err, distribution.ErrBlobUploadUnknown) } // 400 valid json diff --git a/docs/client/errors.go b/docs/client/errors.go index 2bb64a44..c4296fa3 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -9,34 +9,6 @@ import ( "github.com/docker/distribution/registry/api/v2" ) -// BlobUploadNotFoundError is returned when making a blob upload operation against an -// invalid blob upload location url. -// This may be the result of using a cancelled, completed, or stale upload -// location. -type BlobUploadNotFoundError struct { - Location string -} - -func (e *BlobUploadNotFoundError) Error() string { - return fmt.Sprintf("No blob upload found at Location: %s", e.Location) -} - -// BlobUploadInvalidRangeError is returned when attempting to upload an image -// blob chunk that is out of order. -// This provides the known BlobSize and LastValidRange which can be used to -// resume the upload. -type BlobUploadInvalidRangeError struct { - Location string - LastValidRange int - BlobSize int -} - -func (e *BlobUploadInvalidRangeError) Error() string { - return fmt.Sprintf( - "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", - e.Location, e.LastValidRange, e.BlobSize) -} - // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { From 131b608aeb1d9fb3d2b88cc5f81fbdd99433ca84 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 16:50:17 -0700 Subject: [PATCH 124/501] Create client transport package Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/repository.go | 9 +++++---- docs/client/{ => transport}/authchallenge.go | 2 +- docs/client/{ => transport}/authchallenge_test.go | 2 +- docs/client/{ => transport}/http_reader.go | 2 +- docs/client/{ => transport}/session.go | 2 +- docs/client/{ => transport}/session_test.go | 8 +++++++- docs/client/{ => transport}/transport.go | 2 +- 7 files changed, 17 insertions(+), 10 deletions(-) rename docs/client/{ => transport}/authchallenge.go (99%) rename docs/client/{ => transport}/authchallenge_test.go (98%) rename docs/client/{ => transport}/http_reader.go (99%) rename docs/client/{ => transport}/session.go (99%) rename docs/client/{ => transport}/session_test.go (97%) rename docs/client/{ => transport}/transport.go (99%) diff --git a/docs/client/repository.go b/docs/client/repository.go index 61dcf0f4..788e7904 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -18,16 +18,17 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" ) -// NewRepository creates a new Repository for the given repository name and endpoint -func NewRepository(ctx context.Context, name, endpoint string, transport http.RoundTripper) (distribution.Repository, error) { +// NewRepository creates a new Repository for the given repository name and base URL +func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { if err := v2.ValidateRespositoryName(name); err != nil { return nil, err } - ub, err := v2.NewURLBuilderFromString(endpoint) + ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } @@ -292,7 +293,7 @@ func (ls *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea return nil, err } - return NewHTTPReadSeeker(ls.repository.client, blobURL, stat.Length), nil + return transport.NewHTTPReadSeeker(ls.repository.client, blobURL, stat.Length), nil } func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { diff --git a/docs/client/authchallenge.go b/docs/client/transport/authchallenge.go similarity index 99% rename from docs/client/authchallenge.go rename to docs/client/transport/authchallenge.go index 49cf270e..fffd560b 100644 --- a/docs/client/authchallenge.go +++ b/docs/client/transport/authchallenge.go @@ -1,4 +1,4 @@ -package client +package transport import ( "net/http" diff --git a/docs/client/authchallenge_test.go b/docs/client/transport/authchallenge_test.go similarity index 98% rename from docs/client/authchallenge_test.go rename to docs/client/transport/authchallenge_test.go index 802c94f3..45c932b9 100644 --- a/docs/client/authchallenge_test.go +++ b/docs/client/transport/authchallenge_test.go @@ -1,4 +1,4 @@ -package client +package transport import ( "net/http" diff --git a/docs/client/http_reader.go b/docs/client/transport/http_reader.go similarity index 99% rename from docs/client/http_reader.go rename to docs/client/transport/http_reader.go index 22f9bfbc..de728a96 100644 --- a/docs/client/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -1,4 +1,4 @@ -package client +package transport import ( "bufio" diff --git a/docs/client/session.go b/docs/client/transport/session.go similarity index 99% rename from docs/client/session.go rename to docs/client/transport/session.go index 41bb4f31..670be1ba 100644 --- a/docs/client/session.go +++ b/docs/client/transport/session.go @@ -1,4 +1,4 @@ -package client +package transport import ( "encoding/json" diff --git a/docs/client/session_test.go b/docs/client/transport/session_test.go similarity index 97% rename from docs/client/session_test.go rename to docs/client/transport/session_test.go index cf8e546e..374d6e79 100644 --- a/docs/client/session_test.go +++ b/docs/client/transport/session_test.go @@ -1,4 +1,4 @@ -package client +package transport import ( "encoding/base64" @@ -11,6 +11,12 @@ import ( "github.com/docker/distribution/testutil" ) +func testServer(rrm testutil.RequestResponseMap) (string, func()) { + h := testutil.NewHandler(rrm) + s := httptest.NewServer(h) + return s.URL, s.Close +} + type testAuthenticationWrapper struct { headers http.Header authCheck func(string) bool diff --git a/docs/client/transport.go b/docs/client/transport/transport.go similarity index 99% rename from docs/client/transport.go rename to docs/client/transport/transport.go index 0b241619..c8cfbb19 100644 --- a/docs/client/transport.go +++ b/docs/client/transport/transport.go @@ -1,4 +1,4 @@ -package client +package transport import ( "io" From eb2ac4301f26b6031c7aad48657a5ac30adca8a4 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 15 May 2015 17:37:32 -0700 Subject: [PATCH 125/501] Lint and documentation fixes Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/blob_writer_test.go | 2 +- docs/client/transport/http_reader.go | 3 +++ docs/client/transport/transport.go | 6 ++++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 0cc20da4..4d2ae862 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -152,7 +152,7 @@ func TestUploadReadFrom(t *testing.T) { t.Fatalf("Expected error when not found") } if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("Wrong error thrown: %s, expected", err, distribution.ErrBlobUploadUnknown) + t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) } // 400 valid json diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index de728a96..d10d37e0 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -13,6 +13,9 @@ import ( "github.com/docker/distribution" ) +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. func NewHTTPReadSeeker(client *http.Client, url string, size int64) distribution.ReadSeekCloser { return &httpReadSeeker{ client: client, diff --git a/docs/client/transport/transport.go b/docs/client/transport/transport.go index c8cfbb19..30e45fab 100644 --- a/docs/client/transport/transport.go +++ b/docs/client/transport/transport.go @@ -6,12 +6,16 @@ import ( "sync" ) +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. type RequestModifier interface { ModifyRequest(*http.Request) error } type headerModifier http.Header +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. func NewHeaderRequestModifier(header http.Header) RequestModifier { return headerModifier(header) } @@ -24,6 +28,8 @@ func (h headerModifier) ModifyRequest(req *http.Request) error { return nil } +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { return &transport{ Modifiers: modifiers, From ea39e348049393de7060b5a71063d971ebdaed5d Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 19 May 2015 13:25:08 -0400 Subject: [PATCH 126/501] Fix typo: respository->repository Signed-off-by: Jordan Liggitt --- docs/api/v2/names.go | 6 +++--- docs/api/v2/names_test.go | 2 +- docs/storage/cache/memory.go | 2 +- docs/storage/cache/redis.go | 2 +- docs/storage/registry.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index e4a98861..19cb72a0 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -46,7 +46,7 @@ var ( // ErrRepositoryNameComponentShort is returned when a repository name // contains a component which is shorter than // RepositoryNameComponentMinLength - ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) + ErrRepositoryNameComponentShort = fmt.Errorf("repository name component must be %v or more characters", RepositoryNameComponentMinLength) // ErrRepositoryNameMissingComponents is returned when a repository name // contains fewer than RepositoryNameMinComponents components @@ -61,7 +61,7 @@ var ( ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) ) -// ValidateRespositoryName ensures the repository name is valid for use in the +// ValidateRepositoryName ensures the repository name is valid for use in the // registry. This function accepts a superset of what might be accepted by // docker core or docker hub. If the name does not pass validation, an error, // describing the conditions, is returned. @@ -75,7 +75,7 @@ var ( // // The result of the production, known as the "namespace", should be limited // to 255 characters. -func ValidateRespositoryName(name string) error { +func ValidateRepositoryName(name string) error { if len(name) > RepositoryNameTotalLengthMax { return ErrRepositoryNameLong } diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index de6a168f..d1dd2b48 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -80,7 +80,7 @@ func TestRepositoryNameRegexp(t *testing.T) { t.Fail() } - if err := ValidateRespositoryName(testcase.input); err != testcase.err { + if err := ValidateRepositoryName(testcase.input); err != testcase.err { if testcase.err != nil { if err != nil { failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) diff --git a/docs/storage/cache/memory.go b/docs/storage/cache/memory.go index 40ab0d94..125c11fb 100644 --- a/docs/storage/cache/memory.go +++ b/docs/storage/cache/memory.go @@ -25,7 +25,7 @@ func NewInMemoryBlobDescriptorCacheProvider() BlobDescriptorCacheProvider { } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRespositoryName(repo); err != nil { + if err := v2.ValidateRepositoryName(repo); err != nil { return nil, err } diff --git a/docs/storage/cache/redis.go b/docs/storage/cache/redis.go index c0e542bc..1f3727f0 100644 --- a/docs/storage/cache/redis.go +++ b/docs/storage/cache/redis.go @@ -43,7 +43,7 @@ func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) BlobDescriptorCachePr // RepositoryScoped returns the scoped cache. func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRespositoryName(repo); err != nil { + if err := v2.ValidateRepositoryName(repo); err != nil { return nil, err } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 659c789e..331aba73 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -62,7 +62,7 @@ func (reg *registry) Scope() distribution.Scope { // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRespositoryName(name); err != nil { + if err := v2.ValidateRepositoryName(name); err != nil { return nil, distribution.ErrRepositoryNameInvalid{ Name: name, Reason: err, From 2c1a83f940ba34c7feab1a04882019413db02584 Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Thu, 23 Apr 2015 16:13:52 +0000 Subject: [PATCH 127/501] Storage Driver: Ceph Object Storage (RADOS) This driver implements the storagedriver.StorageDriver interface and uses Ceph Object Storage as storage backend. Since RADOS is an object storage and no hierarchy notion, the following convention is used to keep the filesystem notions stored in this backend: * All the objects data are stored with opaque UUID names prefixed (e.g. "blob:d3d232ff-ab3a-4046-9ab7-930228d4c164). * All the hierarchy information are stored in rados omaps, where the omap object identifier is the virtual directory name, the keys in a specific are the relative filenames and the values the blob object identifier (or empty value for a sub directory). e.g. For the following hierarchy: /directory1 /directory1/object1 /directory1/object2 /directory1/directory2/object3 The omap "/directory1" will contains the following key / values: - "object1" "blob:d3d232ff-ab3a-4046-9ab7-930228d4c164" - "object2" "blob:db2e359d-4af0-4bfb-ba1d-d2fd029866a0" - "directory2" "" The omap "/directory1/directory2" will contains: - "object3" "blob:9ae2371c-81fc-4945-80ac-8bf7f566a5d9" * The MOVE is implemented by changing the reference to a specific blob in its parent virtual directory omap. This driver stripes rados objects to a fixed size (e.g. 4M). The idea is to keep small objects (as done by RBD on the top of RADOS) that will be easily synchronized accross OSDs. The information of the original object (i.e total size of the chunks) is stored as a Xattr in the first chunk object. Signed-off-by: Vincent Giersch --- docs/storage/driver/rados/rados.go | 628 ++++++++++++++++++++++++ docs/storage/driver/rados/rados_test.go | 38 ++ 2 files changed, 666 insertions(+) create mode 100644 docs/storage/driver/rados/rados.go create mode 100644 docs/storage/driver/rados/rados_test.go diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go new file mode 100644 index 00000000..999b06b0 --- /dev/null +++ b/docs/storage/driver/rados/rados.go @@ -0,0 +1,628 @@ +package rados + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "path" + "strconv" + + "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/noahdesu/go-ceph/rados" +) + +const driverName = "rados" + +// Prefix all the stored blob +const objectBlobPrefix = "blob:" + +// Stripes objects size to 4M +const defaultChunkSize = 4 << 20 +const defaultXattrTotalSizeName = "total-size" + +// Max number of keys fetched from omap at each read operation +const defaultKeysFetched = 1 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + poolname string + username string + chunksize uint64 +} + +func init() { + factory.Register(driverName, &radosDriverFactory{}) +} + +// radosDriverFactory implements the factory.StorageDriverFactory interface +type radosDriverFactory struct{} + +func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn *rados.Conn + Ioctx *rados.IOContext + chunksize uint64 +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - poolname: the ceph pool name +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + + pool, ok := parameters["poolname"] + if !ok { + return nil, fmt.Errorf("No poolname parameter provided") + } + + username, ok := parameters["username"] + if !ok { + username = "" + } + + chunksize := uint64(defaultChunkSize) + chunksizeParam, ok := parameters["chunksize"] + if ok { + chunksize, ok = chunksizeParam.(uint64) + if !ok { + return nil, fmt.Errorf("The chunksize parameter should be a number") + } + } + + params := DriverParameters{ + fmt.Sprint(pool), + fmt.Sprint(username), + chunksize, + } + + return New(params) +} + +// New constructs a new Driver +func New(params DriverParameters) (*Driver, error) { + var conn *rados.Conn + var err error + + if params.username != "" { + log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) + conn, err = rados.NewConnWithUser(params.username) + } else { + log.Infof("Opening connection to pool %s", params.poolname) + conn, err = rados.NewConn() + } + + if err != nil { + return nil, err + } + + err = conn.ReadDefaultConfigFile() + if err != nil { + return nil, err + } + + err = conn.Connect() + if err != nil { + return nil, err + } + + log.Infof("Connected") + + ioctx, err := conn.OpenIOContext(params.poolname) + + log.Infof("Connected to pool %s", params.poolname) + + if err != nil { + return nil, err + } + + d := &driver{ + Ioctx: ioctx, + Conn: conn, + chunksize: params.chunksize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { + return err + } + + return nil +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +type readStreamReader struct { + driver *driver + oid string + size uint64 + offset uint64 +} + +func (r *readStreamReader) Read(b []byte) (n int, err error) { + // Determine the part available to read + bufferOffset := uint64(0) + bufferSize := uint64(len(b)) + + // End of the object, read less than the buffer size + if bufferSize > r.size-r.offset { + bufferSize = r.size - r.offset + } + + // Fill `b` + for bufferOffset < bufferSize { + // Get the offset in the object chunk + chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) + + // Determine the best size to read + bufferEndOffset := bufferSize + if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { + bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) + } + + // Read the chunk + n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) + + if err != nil { + return int(bufferOffset), err + } + + bufferOffset += uint64(n) + r.offset += uint64(n) + } + + // EOF if the offset is at the end of the object + if r.offset == r.size { + return int(bufferOffset), io.EOF + } + + return int(bufferOffset), nil +} + +func (r *readStreamReader) Close() error { + return nil +} + +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // get object stat + stat, err := d.Stat(ctx, path) + + if err != nil { + return nil, err + } + + if offset > stat.Size() { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + return &readStreamReader{ + driver: d, + oid: oid, + size: uint64(stat.Size()), + offset: uint64(offset), + }, nil +} + +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + buf := make([]byte, d.chunksize) + totalRead = 0 + + oid, err := d.getOid(path) + if err != nil { + switch err.(type) { + // Trying to write new object, generate new blob identifier for it + case storagedriver.PathNotFoundError: + oid = d.generateOid() + err = d.putOid(path, oid) + if err != nil { + return 0, err + } + default: + return 0, err + } + } else { + // Check total object size only for existing ones + totalSize, err := d.getXattrTotalSize(ctx, oid) + if err != nil { + return 0, err + } + + // If offset if after the current object size, fill the gap with zeros + for totalSize < uint64(offset) { + sizeToWrite := d.chunksize + if totalSize-uint64(offset) < sizeToWrite { + sizeToWrite = totalSize - uint64(offset) + } + + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) + err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) + if err != nil { + return totalRead, err + } + + totalSize += sizeToWrite + } + } + + // Writer + for { + // Align to chunk size + sizeRead := uint64(0) + sizeToRead := uint64(offset+totalRead) % d.chunksize + if sizeToRead == 0 { + sizeToRead = d.chunksize + } + + // Read from `reader` + for sizeRead < sizeToRead { + nn, err := reader.Read(buf[sizeRead:sizeToRead]) + sizeRead += uint64(nn) + + if err != nil { + if err != io.EOF { + return totalRead, err + } + + break + } + } + + // End of file and nothing was read + if sizeRead == 0 { + break + } + + // Write chunk object + chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) + err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) + + if err != nil { + return totalRead, err + } + + // Update total object size as xattr in the first chunk of the object + err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) + if err != nil { + return totalRead, err + } + + totalRead += int64(sizeRead) + + // End of file + if sizeRead < sizeToRead { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + // get oid from filename + oid, err := d.getOid(path) + + if err != nil { + return nil, err + } + + // the path is a virtual directory? + if oid == "" { + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: 0, + IsDir: true, + }, + }, nil + } + + // stat first chunk + stat, err := d.Ioctx.Stat(oid + "-0") + + if err != nil { + return nil, err + } + + // get total size of chunked object + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return nil, err + } + + return storagedriver.FileInfoInternal{ + FileInfoFields: storagedriver.FileInfoFields{ + Path: path, + Size: int64(totalSize), + ModTime: stat.ModTime, + }, + }, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { + files, err := d.listDirectoryOid(dirPath) + + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(files)) + for k := range files { + keys = append(keys, path.Join(dirPath, k)) + } + + return keys, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + // Get oid + oid, err := d.getOid(sourcePath) + + if err != nil { + return err + } + + // Move reference + err = d.putOid(destPath, oid) + + if err != nil { + return err + } + + // Delete old reference + err = d.deleteOid(sourcePath) + + if err != nil { + return err + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, objectPath string) error { + // Get oid + oid, err := d.getOid(objectPath) + + if err != nil { + return err + } + + // Deleting virtual directory + if oid == "" { + objects, err := d.listDirectoryOid(objectPath) + if err != nil { + return err + } + + for object := range objects { + err = d.Delete(ctx, path.Join(objectPath, object)) + if err != nil { + return err + } + } + } else { + // Delete object chunks + totalSize, err := d.getXattrTotalSize(ctx, oid) + + if err != nil { + return err + } + + for offset := uint64(0); offset < totalSize; offset += d.chunksize { + chunkName, _ := d.getChunkNameFromOffset(oid, offset) + + err = d.Ioctx.Delete(chunkName) + if err != nil { + return err + } + } + + // Delete reference + err = d.deleteOid(objectPath) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +// Generate a blob identifier +func (d *driver) generateOid() string { + return objectBlobPrefix + uuid.New() +} + +// Reference a object and its hierarchy +func (d *driver) putOid(objectPath string, oid string) error { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + createParentReference := true + + // After creating this reference, skip the parents referencing since the + // hierarchy already exists + if oid == "" { + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + if (err == nil) && (len(firstReference) > 0) { + createParentReference = false + } + } + + oids := map[string][]byte{ + base: []byte(oid), + } + + // Reference object + err := d.Ioctx.SetOmap(directory, oids) + if err != nil { + return err + } + + // Esure parent virtual directories + if createParentReference && directory != "/" { + return d.putOid(directory, "") + } + + return nil +} + +// Get the object identifier from an object name +func (d *driver) getOid(objectPath string) (string, error) { + directory := path.Dir(objectPath) + base := path.Base(objectPath) + + files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) + + if (err != nil) || (files[base] == nil) { + return "", storagedriver.PathNotFoundError{Path: objectPath} + } + + return string(files[base]), nil +} + +// List the objects of a virtual directory +func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { + return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) +} + +// Remove a file from the files hierarchy +func (d *driver) deleteOid(objectPath string) error { + // Remove object reference + directory := path.Dir(objectPath) + base := path.Base(objectPath) + err := d.Ioctx.RmOmapKeys(directory, []string{base}) + + if err != nil { + return err + } + + // Remove virtual directory if empty (no more references) + firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) + + if err != nil { + return err + } + + if len(firstReference) == 0 { + // Delete omap + err := d.Ioctx.Delete(directory) + + if err != nil { + return err + } + + // Remove reference on parent omaps + if directory != "/" { + return d.deleteOid(directory) + } + } + + return nil +} + +// Takes an offset in an chunked object and return the chunk name and a new +// offset in this chunk object +func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { + chunkID := offset / d.chunksize + chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) + chunkedOffset := offset % d.chunksize + return chunkedOid, chunkedOffset +} + +// Set the total size of a chunked object `oid` +func (d *driver) setXattrTotalSize(oid string, size uint64) error { + // Convert uint64 `size` to []byte + xattr := make([]byte, binary.MaxVarintLen64) + binary.LittleEndian.PutUint64(xattr, size) + + // Save the total size as a xattr in the first chunk + return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) +} + +// Get the total size of the chunked object `oid` stored as xattr +func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { + // Fetch xattr as []byte + xattr := make([]byte, binary.MaxVarintLen64) + xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) + + if err != nil { + return 0, err + } + + if xattrLength != len(xattr) { + context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) + return 0, storagedriver.PathNotFoundError{Path: oid} + } + + // Convert []byte as uint64 + totalSize := binary.LittleEndian.Uint64(xattr) + + return totalSize, nil +} diff --git a/docs/storage/driver/rados/rados_test.go b/docs/storage/driver/rados/rados_test.go new file mode 100644 index 00000000..29486e89 --- /dev/null +++ b/docs/storage/driver/rados/rados_test.go @@ -0,0 +1,38 @@ +package rados + +import ( + "os" + "testing" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +func init() { + poolname := os.Getenv("RADOS_POOL") + username := os.Getenv("RADOS_USER") + + driverConstructor := func() (storagedriver.StorageDriver, error) { + parameters := DriverParameters{ + poolname, + username, + defaultChunkSize, + } + + return New(parameters) + } + + skipCheck := func() string { + if poolname == "" { + return "RADOS_POOL must be set to run Rado tests" + } + return "" + } + + testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) +} From 8db2145b819626a0f78023de09aef22664544a80 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 19 May 2015 19:18:30 -0700 Subject: [PATCH 128/501] Feedback update Update comments and TODOs Fix switch style Updated parse http response to take in reader Add Cancel implementation Update blobstore variable name Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/blob_writer.go | 23 +++++++-- docs/client/blob_writer_test.go | 10 ---- docs/client/errors.go | 7 +-- docs/client/repository.go | 90 +++++++++++++++++---------------- 4 files changed, 68 insertions(+), 62 deletions(-) diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 06ca8738..55223520 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -2,7 +2,6 @@ package client import ( "bytes" - "errors" "fmt" "io" "io/ioutil" @@ -49,7 +48,6 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { return 0, hbu.handleErrorResponse(resp) } - // TODO(dmcgowan): Validate headers hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { @@ -85,7 +83,6 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { return 0, hbu.handleErrorResponse(resp) } - // TODO(dmcgowan): Validate headers hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { @@ -110,7 +107,7 @@ func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: - return newOffset, errors.New("Cannot seek from end on incomplete upload") + newOffset += int64(offset) case os.SEEK_SET: newOffset = int64(offset) } @@ -143,6 +140,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip if err != nil { return distribution.Descriptor{}, err } + defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { return distribution.Descriptor{}, hbu.handleErrorResponse(resp) @@ -152,7 +150,22 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - panic("not implemented") + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusNoContent, http.StatusNotFound: + return nil + default: + return hbu.handleErrorResponse(resp) + } } func (hbu *httpBlobUpload) Close() error { diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 4d2ae862..674d6e01 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -205,13 +205,3 @@ func TestUploadReadFrom(t *testing.T) { t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) } } - -//repo distribution.Repository -//client *http.Client - -//uuid string -//startedAt time.Time - -//location string // always the last value of the location header. -//offset int64 -//closed bool diff --git a/docs/client/errors.go b/docs/client/errors.go index c4296fa3..c6c802a2 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -3,6 +3,7 @@ package client import ( "encoding/json" "fmt" + "io" "io/ioutil" "net/http" @@ -34,9 +35,9 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), shortenedResponse) } -func parseHTTPErrorResponse(response *http.Response) error { +func parseHTTPErrorResponse(r io.Reader) error { var errors v2.Errors - body, err := ioutil.ReadAll(response.Body) + body, err := ioutil.ReadAll(r) if err != nil { return err } @@ -52,7 +53,7 @@ func parseHTTPErrorResponse(response *http.Response) error { func handleErrorResponse(resp *http.Response) error { if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp) + return parseHTTPErrorResponse(resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/docs/client/repository.go b/docs/client/repository.go index 788e7904..123ef6ce 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -11,12 +11,10 @@ import ( "strconv" "time" - "github.com/docker/distribution/manifest" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" @@ -108,8 +106,8 @@ func (ms *manifests) Tags() ([]string, error) { } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusOK: + switch resp.StatusCode { + case http.StatusOK: b, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err @@ -123,7 +121,7 @@ func (ms *manifests) Tags() ([]string, error) { } return tagsResponse.Tags, nil - case resp.StatusCode == http.StatusNotFound: + case http.StatusNotFound: return nil, nil default: return nil, handleErrorResponse(resp) @@ -131,6 +129,8 @@ func (ms *manifests) Tags() ([]string, error) { } func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. return ms.ExistsByTag(dgst.String()) } @@ -145,10 +145,10 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return false, err } - switch { - case resp.StatusCode == http.StatusOK: + switch resp.StatusCode { + case http.StatusOK: return true, nil - case resp.StatusCode == http.StatusNotFound: + case http.StatusNotFound: return false, nil default: return false, handleErrorResponse(resp) @@ -156,6 +156,8 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { } func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + // Call by Tag endpoint since the API uses the same + // URL endpoint for tags and digests. return ms.GetByTag(dgst.String()) } @@ -171,8 +173,8 @@ func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) { } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusOK: + switch resp.StatusCode { + case http.StatusOK: var sm manifest.SignedManifest decoder := json.NewDecoder(resp.Body) @@ -203,9 +205,9 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error { } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusAccepted: - // TODO(dmcgowan): Use or check digest header + switch resp.StatusCode { + case http.StatusAccepted: + // TODO(dmcgowan): make use of digest header return nil default: return handleErrorResponse(resp) @@ -228,8 +230,8 @@ func (ms *manifests) Delete(dgst digest.Digest) error { } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusOK: + switch resp.StatusCode { + case http.StatusOK: return nil default: return handleErrorResponse(resp) @@ -263,17 +265,17 @@ func sanitizeLocation(location, source string) (string, error) { return location, nil } -func (ls *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return ls.statter.Stat(ctx, dgst) +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) } -func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - desc, err := ls.Stat(ctx, dgst) +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + desc, err := bs.Stat(ctx, dgst) if err != nil { return nil, err } - reader, err := ls.Open(ctx, desc.Digest) + reader, err := bs.Open(ctx, desc.Digest) if err != nil { return nil, err } @@ -282,26 +284,26 @@ func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { return ioutil.ReadAll(reader) } -func (ls *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - stat, err := ls.statter.Stat(ctx, dgst) +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + stat, err := bs.statter.Stat(ctx, dgst) if err != nil { return nil, err } - blobURL, err := ls.ub.BuildBlobURL(ls.Name(), stat.Digest) + blobURL, err := bs.ub.BuildBlobURL(bs.Name(), stat.Digest) if err != nil { return nil, err } - return transport.NewHTTPReadSeeker(ls.repository.client, blobURL, stat.Length), nil + return transport.NewHTTPReadSeeker(bs.repository.client, blobURL, stat.Length), nil } -func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - return nil +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") } -func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := ls.Create(ctx) +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } @@ -323,17 +325,17 @@ func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (ls *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { - u, err := ls.ub.BuildBlobUploadURL(ls.name) +func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { + u, err := bs.ub.BuildBlobUploadURL(bs.name) - resp, err := ls.client.Post(u, "", nil) + resp, err := bs.client.Post(u, "", nil) if err != nil { return nil, err } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusAccepted: + switch resp.StatusCode { + case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -342,8 +344,8 @@ func (ls *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } return &httpBlobUpload{ - repo: ls.repository, - client: ls.client, + repo: bs.repository, + client: bs.client, uuid: uuid, startedAt: time.Now(), location: location, @@ -353,7 +355,7 @@ func (ls *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } } -func (ls *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } @@ -361,20 +363,20 @@ type blobStatter struct { *repository } -func (ls *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := ls.ub.BuildBlobURL(ls.name, dgst) +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + u, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return distribution.Descriptor{}, err } - resp, err := ls.client.Head(u) + resp, err := bs.client.Head(u) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() - switch { - case resp.StatusCode == http.StatusOK: + switch resp.StatusCode { + case http.StatusOK: lengthHeader := resp.Header.Get("Content-Length") length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { @@ -386,7 +388,7 @@ func (ls *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi Length: length, Digest: dgst, }, nil - case resp.StatusCode == http.StatusNotFound: + case http.StatusNotFound: return distribution.Descriptor{}, distribution.ErrBlobUnknown default: return distribution.Descriptor{}, handleErrorResponse(resp) From b4972a6bab3965ecd186c392d058f7bb43fd8e7a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 19 May 2015 19:56:27 -0700 Subject: [PATCH 129/501] Break down type dependencies Each type no longer requires holding a reference to repository. Added implementation for signatures get. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/blob_writer.go | 6 +++--- docs/client/repository.go | 42 ++++++++++++++++++++++++++------------ 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 55223520..9ebd4183 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -14,8 +14,8 @@ import ( ) type httpBlobUpload struct { - repo distribution.Repository - client *http.Client + statter distribution.BlobStatter + client *http.Client uuid string startedAt time.Time @@ -146,7 +146,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } - return hbu.repo.Blobs(ctx).Stat(ctx, desc.Digest) + return hbu.statter.Stat(ctx, desc.Digest) } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { diff --git a/docs/client/repository.go b/docs/client/repository.go index 123ef6ce..a1117ac2 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -58,32 +58,42 @@ func (r *repository) Name() string { func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ - repository: r, + name: r.Name(), + ub: r.ub, + client: r.client, } return &blobs{ - repository: r, - statter: cache.NewCachedBlobStatter(cache.NewInMemoryBlobDescriptorCacheProvider(), statter), + name: r.Name(), + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(cache.NewInMemoryBlobDescriptorCacheProvider(), statter), } } func (r *repository) Manifests() distribution.ManifestService { return &manifests{ - repository: r, + name: r.Name(), + ub: r.ub, + client: r.client, } } func (r *repository) Signatures() distribution.SignatureService { return &signatures{ - repository: r, + manifests: r.Manifests(), } } type signatures struct { - *repository + manifests distribution.ManifestService } func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { - panic("not implemented") + m, err := s.manifests.Get(dgst) + if err != nil { + return nil, err + } + return m.Signatures() } func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { @@ -91,7 +101,9 @@ func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { } type manifests struct { - *repository + name string + ub *v2.URLBuilder + client *http.Client } func (ms *manifests) Tags() ([]string, error) { @@ -239,7 +251,9 @@ func (ms *manifests) Delete(dgst digest.Digest) error { } type blobs struct { - *repository + name string + ub *v2.URLBuilder + client *http.Client statter distribution.BlobStatter } @@ -290,12 +304,12 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea return nil, err } - blobURL, err := bs.ub.BuildBlobURL(bs.Name(), stat.Digest) + blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) if err != nil { return nil, err } - return transport.NewHTTPReadSeeker(bs.repository.client, blobURL, stat.Length), nil + return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Length), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { @@ -344,7 +358,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } return &httpBlobUpload{ - repo: bs.repository, + statter: bs.statter, client: bs.client, uuid: uuid, startedAt: time.Now(), @@ -360,7 +374,9 @@ func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter } type blobStatter struct { - *repository + name string + ub *v2.URLBuilder + client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { From 3b5a2bbebcd1bdc0809232920a477e452ebf21a5 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 May 2015 10:05:44 -0700 Subject: [PATCH 130/501] Add unauthorized error check Add check for unauthorized error code and explicitly set the error code if the content could not be parsed. Updated repository test for unauthorized tests and nit feedback. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/errors.go | 11 +++++ docs/client/repository_test.go | 82 +++++++++++++++++++++------------- 2 files changed, 61 insertions(+), 32 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index c6c802a2..e6ad5f51 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -52,6 +52,17 @@ func parseHTTPErrorResponse(r io.Reader) error { } func handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == 401 { + err := parseHTTPErrorResponse(resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { + return &v2.Error{ + Code: v2.ErrorCodeUnauthorized, + Message: "401 Unauthorized", + Detail: uErr.Response, + } + } + return err + } if resp.StatusCode >= 400 && resp.StatusCode < 500 { return parseHTTPErrorResponse(resp.Body) } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index f0f40316..9530bd37 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" ) @@ -73,26 +74,10 @@ func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.R }) } -func addPing(m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Headers: http.Header(map[string][]string{ - "Docker-Distribution-API-Version": {"registry/2.0"}, - }), - }, - }) -} - func TestBlobFetch(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) - addPing(&m) e, c := testServer(m) defer c() @@ -112,14 +97,13 @@ func TestBlobFetch(t *testing.T) { t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) } - // TODO(dmcgowan): Test error cases + // TODO(dmcgowan): Test for unknown blob case } func TestBlobExists(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) - addPing(&m) e, c := testServer(m) defer c() @@ -150,7 +134,6 @@ func TestBlobExists(t *testing.T) { func TestBlobUploadChunked(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - addPing(&m) chunks := [][]byte{ b1[0:256], b1[256:512], @@ -272,7 +255,6 @@ func TestBlobUploadChunked(t *testing.T) { func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - addPing(&m) repo := "test.example.com/uploadrepo" uploadID := uuid.New() m = append(m, testutil.RequestResponseMapping{ @@ -378,7 +360,7 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchema1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { blobs := make([]manifest.FSLayer, blobCount) history := make([]manifest.History, blobCount) @@ -474,9 +456,8 @@ func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { func TestManifestFetch(t *testing.T) { repo := "test.example.com/repo" - m1, dgst := newRandomSchema1Manifest(repo, "latest", 6) + m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addPing(&m) addTestManifest(repo, dgst.String(), m1.Raw, &m) e, c := testServer(m) @@ -507,9 +488,8 @@ func TestManifestFetch(t *testing.T) { func TestManifestFetchByTag(t *testing.T) { repo := "test.example.com/repo/by/tag" - m1, _ := newRandomSchema1Manifest(repo, "latest", 6) + m1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addPing(&m) addTestManifest(repo, "latest", m1.Raw, &m) e, c := testServer(m) @@ -540,10 +520,9 @@ func TestManifestFetchByTag(t *testing.T) { func TestManifestDelete(t *testing.T) { repo := "test.example.com/repo/delete" - _, dgst1 := newRandomSchema1Manifest(repo, "latest", 6) - _, dgst2 := newRandomSchema1Manifest(repo, "latest", 6) + _, dgst1 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addPing(&m) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", @@ -577,9 +556,8 @@ func TestManifestDelete(t *testing.T) { func TestManifestPut(t *testing.T) { repo := "test.example.com/repo/delete" - m1, dgst := newRandomSchema1Manifest(repo, "other", 6) + m1, dgst := newRandomSchemaV1Manifest(repo, "other", 6) var m testutil.RequestResponseMap - addPing(&m) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", @@ -608,7 +586,7 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } - // TODO(dmcgowan): Check for error cases + // TODO(dmcgowan): Check for invalid input error } func TestManifestTags(t *testing.T) { @@ -624,7 +602,6 @@ func TestManifestTags(t *testing.T) { } `)) var m testutil.RequestResponseMap - addPing(&m) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -661,3 +638,44 @@ func TestManifestTags(t *testing.T) { // TODO(dmcgowan): Check for error cases } + +func TestManifestUnauthorized(t *testing.T) { + repo := "test.example.com/repo" + _, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + var m testutil.RequestResponseMap + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusUnauthorized, + Body: []byte("garbage"), + }, + }) + + e, c := testServer(m) + defer c() + + r, err := NewRepository(context.Background(), repo, e, nil) + if err != nil { + t.Fatal(err) + } + ms := r.Manifests() + + _, err = ms.Get(dgst) + if err == nil { + t.Fatal("Expected error fetching manifest") + } + v2Err, ok := err.(*v2.Error) + if !ok { + t.Fatalf("Unexpected error type: %#v", err) + } + if v2Err.Code != v2.ErrorCodeUnauthorized { + t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) + } + if expected := "401 Unauthorized"; v2Err.Message != expected { + t.Fatalf("Unexpected message value: %s, expected %s", v2Err.Message, expected) + } +} From 7e4d5eafae5b0f56c539ac8a04c88b9f07d9823c Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 May 2015 10:09:37 -0700 Subject: [PATCH 131/501] Update transport package to sever distribution dependency The transport package no longer requires importing distribution for the ReadSeekCloser, instead declares its own. Added comments on the Authenication handler in session. Added todo on http seek reader to highlight its lack of belonging to the client transport. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/transport/http_reader.go | 11 ++++++++--- docs/client/transport/session.go | 5 +++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index d10d37e0..e351bdfe 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -9,14 +9,19 @@ import ( "io/ioutil" "net/http" "os" - - "github.com/docker/distribution" ) +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + // NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. -func NewHTTPReadSeeker(client *http.Client, url string, size int64) distribution.ReadSeekCloser { +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser { return &httpReadSeeker{ client: client, url: url, diff --git a/docs/client/transport/session.go b/docs/client/transport/session.go index 670be1ba..5086c021 100644 --- a/docs/client/transport/session.go +++ b/docs/client/transport/session.go @@ -14,7 +14,12 @@ import ( // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. AuthorizeRequest(req *http.Request, params map[string]string) error } From aac3ce46c7ba78740bc556fb732c131cc4e8887e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 May 2015 13:35:23 -0700 Subject: [PATCH 132/501] Only do auth checks for endpoints starting with v2 Changes behavior so ping doesn't happen if /v2/ is anywhere in a request path, but instead only at the beginning. This fixes attempts to ping on redirected URLs. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/transport/session.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/client/transport/session.go b/docs/client/transport/session.go index 5086c021..90c8082c 100644 --- a/docs/client/transport/session.go +++ b/docs/client/transport/session.go @@ -94,7 +94,9 @@ HeaderLoop: func (ta *tokenAuthorizer) ModifyRequest(req *http.Request) error { v2Root := strings.Index(req.URL.Path, "/v2/") - if v2Root == -1 { + // Test if /v2/ does not exist or not at beginning + // TODO(dmcgowan) support v2 endpoints which have a prefix before /v2/ + if v2Root == -1 || v2Root > 0 { return nil } From 754a8e80f258573b7104d5657dd357859db9356a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 20 May 2015 14:55:59 -0700 Subject: [PATCH 133/501] Remove error message shortening Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/errors.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index e6ad5f51..2638055d 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -28,11 +28,7 @@ type UnexpectedHTTPResponseError struct { } func (e *UnexpectedHTTPResponseError) Error() string { - shortenedResponse := string(e.Response) - if len(shortenedResponse) > 15 { - shortenedResponse = shortenedResponse[:12] + "..." - } - return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), shortenedResponse) + return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(r io.Reader) error { From cd5430916fdfb03dc03a9577534d538080305d9a Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Wed, 20 May 2015 21:06:13 -0400 Subject: [PATCH 134/501] client: fix a typo preventing compilation Signed-off-by: Tibor Vass --- docs/client/repository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index a1117ac2..180d6472 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -22,7 +22,7 @@ import ( // NewRepository creates a new Repository for the given repository name and base URL func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if err := v2.ValidateRespositoryName(name); err != nil { + if err := v2.ValidateRepositoryName(name); err != nil { return nil, err } From 812c8099a6761b93850dd8f185be79b7b498fe03 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 20 May 2015 17:12:40 -0700 Subject: [PATCH 135/501] Decouple redis dependency from blob descriptor cache Ensure that clients can use the blob descriptor cache provider without needing the redis package. Signed-off-by: Stephen J Day --- docs/client/repository.go | 3 ++- docs/handlers/app.go | 7 +++--- docs/handlers/app_test.go | 4 ++-- docs/storage/blob_test.go | 8 +++---- docs/storage/cache/cache.go | 11 ++++------ docs/storage/cache/{ => memory}/memory.go | 11 +++++----- docs/storage/cache/memory/memory_test.go | 13 +++++++++++ docs/storage/cache/memory_test.go | 9 -------- docs/storage/cache/{ => redis}/redis.go | 22 +++++++++---------- docs/storage/cache/{ => redis}/redis_test.go | 5 +++-- .../storage/cache/{cache_test.go => suite.go} | 6 ++--- docs/storage/manifeststore_test.go | 4 ++-- 12 files changed, 53 insertions(+), 50 deletions(-) rename docs/storage/cache/{ => memory}/memory.go (93%) create mode 100644 docs/storage/cache/memory/memory_test.go delete mode 100644 docs/storage/cache/memory_test.go rename docs/storage/cache/{ => redis}/redis.go (93%) rename docs/storage/cache/{ => redis}/redis_test.go (88%) rename docs/storage/cache/{cache_test.go => suite.go} (95%) diff --git a/docs/client/repository.go b/docs/client/repository.go index 180d6472..d43ac0db 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" ) // NewRepository creates a new Repository for the given repository name and base URL @@ -66,7 +67,7 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { name: r.Name(), ub: r.ub, client: r.client, - statter: cache.NewCachedBlobStatter(cache.NewInMemoryBlobDescriptorCacheProvider(), statter), + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), } } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 22c0b6de..1d58e945 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -18,7 +18,8 @@ import ( registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" + rediscache "github.com/docker/distribution/registry/storage/cache/redis" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" @@ -114,10 +115,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewRedisBlobDescriptorCacheProvider(app.redis)) + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis)) ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider()) ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 03ea0c9c..fd1c486c 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -13,7 +13,7 @@ import ( "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" - "github.com/docker/distribution/registry/storage/cache" + memorycache "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "golang.org/x/net/context" ) @@ -30,7 +30,7 @@ func TestAppDispatcher(t *testing.T) { Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()), + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider()), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 6843922a..114e686f 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -12,7 +12,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" ) @@ -35,7 +35,7 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -148,7 +148,7 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -252,7 +252,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/cache/cache.go b/docs/storage/cache/cache.go index e7471c27..79e6d9c8 100644 --- a/docs/storage/cache/cache.go +++ b/docs/storage/cache/cache.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/digest" ) // BlobDescriptorCacheProvider provides repository scoped @@ -17,12 +16,10 @@ type BlobDescriptorCacheProvider interface { RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } -func validateDigest(dgst digest.Digest) error { - return dgst.Validate() -} - -func validateDescriptor(desc distribution.Descriptor) error { - if err := validateDigest(desc.Digest); err != nil { +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { return err } diff --git a/docs/storage/cache/memory.go b/docs/storage/cache/memory/memory.go similarity index 93% rename from docs/storage/cache/memory.go rename to docs/storage/cache/memory/memory.go index 125c11fb..cdd9abe8 100644 --- a/docs/storage/cache/memory.go +++ b/docs/storage/cache/memory/memory.go @@ -1,4 +1,4 @@ -package cache +package memory import ( "sync" @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" ) type inMemoryBlobDescriptorCacheProvider struct { @@ -17,7 +18,7 @@ type inMemoryBlobDescriptorCacheProvider struct { // NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for // storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() BlobDescriptorCacheProvider { +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { return &inMemoryBlobDescriptorCacheProvider{ global: newMapBlobDescriptorCache(), repositories: make(map[string]*mapBlobDescriptorCache), @@ -117,7 +118,7 @@ func newMapBlobDescriptorCache() *mapBlobDescriptorCache { } func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } @@ -133,11 +134,11 @@ func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest } func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return err } - if err := validateDescriptor(desc); err != nil { + if err := cache.ValidateDescriptor(desc); err != nil { return err } diff --git a/docs/storage/cache/memory/memory_test.go b/docs/storage/cache/memory/memory_test.go new file mode 100644 index 00000000..3bae7ccb --- /dev/null +++ b/docs/storage/cache/memory/memory_test.go @@ -0,0 +1,13 @@ +package memory + +import ( + "testing" + + "github.com/docker/distribution/registry/storage/cache" +) + +// TestInMemoryBlobInfoCache checks the in memory implementation is working +// correctly. +func TestInMemoryBlobInfoCache(t *testing.T) { + cache.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) +} diff --git a/docs/storage/cache/memory_test.go b/docs/storage/cache/memory_test.go deleted file mode 100644 index 9f2ce460..00000000 --- a/docs/storage/cache/memory_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package cache - -import "testing" - -// TestInMemoryBlobInfoCache checks the in memory implementation is working -// correctly. -func TestInMemoryBlobInfoCache(t *testing.T) { - checkBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) -} diff --git a/docs/storage/cache/redis.go b/docs/storage/cache/redis/redis.go similarity index 93% rename from docs/storage/cache/redis.go rename to docs/storage/cache/redis/redis.go index 1f3727f0..29bbe3bc 100644 --- a/docs/storage/cache/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -1,13 +1,13 @@ -package cache +package redis import ( "fmt" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) @@ -31,11 +31,9 @@ type redisBlobDescriptorService struct { // request objects, we can change this to a connection. } -var _ BlobDescriptorCacheProvider = &redisBlobDescriptorService{} - // NewRedisBlobDescriptorCacheProvider returns a new redis-based // BlobDescriptorCacheProvider using the provided redis connection pool. -func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) BlobDescriptorCacheProvider { +func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { return &redisBlobDescriptorService{ pool: pool, } @@ -55,7 +53,7 @@ func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribut // Stat retrieves the descriptor data from the redis hash entry. func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } @@ -89,11 +87,11 @@ func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Con // hash. A hash is used here since we may store unrelated fields about a layer // in the future. func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return err } - if err := validateDescriptor(desc); err != nil { + if err := cache.ValidateDescriptor(desc); err != nil { return err } @@ -134,7 +132,7 @@ var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorS // forwards the descriptor request to the global blob store. If the media type // differs for the repository, we override it. func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } @@ -170,11 +168,11 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Conte } func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := validateDigest(dgst); err != nil { + if err := dgst.Validate(); err != nil { return err } - if err := validateDescriptor(desc); err != nil { + if err := cache.ValidateDescriptor(desc); err != nil { return err } diff --git a/docs/storage/cache/redis_test.go b/docs/storage/cache/redis/redis_test.go similarity index 88% rename from docs/storage/cache/redis_test.go rename to docs/storage/cache/redis/redis_test.go index 65c2fd3a..ed6944a1 100644 --- a/docs/storage/cache/redis_test.go +++ b/docs/storage/cache/redis/redis_test.go @@ -1,4 +1,4 @@ -package cache +package redis import ( "flag" @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) @@ -46,5 +47,5 @@ func TestRedisBlobDescriptorCacheProvider(t *testing.T) { t.Fatalf("unexpected error flushing redis db: %v", err) } - checkBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) + cache.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } diff --git a/docs/storage/cache/cache_test.go b/docs/storage/cache/suite.go similarity index 95% rename from docs/storage/cache/cache_test.go rename to docs/storage/cache/suite.go index e923367a..ceefab97 100644 --- a/docs/storage/cache/cache_test.go +++ b/docs/storage/cache/suite.go @@ -8,10 +8,10 @@ import ( "github.com/docker/distribution/digest" ) -// checkBlobDescriptorCache takes a cache implementation through a common set +// CheckBlobDescriptorCache takes a cache implementation through a common set // of operations. If adding new tests, please add them here so new -// implementations get the benefit. -func checkBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { +// implementations get the benefit. This should be used for unit tests. +func CheckBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { ctx := context.Background() checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 59f174b3..3422985a 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -29,7 +29,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) repo, err := registry.Repository(ctx, name) if err != nil { From a0d242d9df4adc1820c70b03efd2269c68f65bf0 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 20 May 2015 23:44:08 -0700 Subject: [PATCH 136/501] Remove digest package's dependency on external sha implementation The change relies on a refactor of the upstream resumable sha256/sha512 package that opts to register implementations with the standard library. This allows the resumable support to be detected where it matters, avoiding unnecessary and complex code. It also ensures that consumers of the digest package don't need to depend on the forked sha implementations. We also get an optimization with this change. If the size of data written to a digester is the same as the file size, we check to see if the digest has been verified. This works if the blob is written and committed in a single request. Signed-off-by: Stephen J Day --- docs/client/repository.go | 2 +- docs/handlers/api_test.go | 6 +- docs/storage/blobwriter.go | 275 +++++------------------- docs/storage/blobwriter_nonresumable.go | 13 +- docs/storage/blobwriter_resumable.go | 196 ++++++++++++++++- docs/storage/linkedblobstore.go | 3 +- 6 files changed, 269 insertions(+), 226 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 180d6472..cd93cd1a 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -322,7 +322,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return distribution.Descriptor{}, err } dgstr := digest.NewCanonicalDigester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr)) + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 9b5027ba..1a41cfb8 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -214,7 +214,7 @@ func TestBlobAPI(t *testing.T) { layerFile.Seek(0, 0) canonicalDigester := digest.NewCanonicalDigester() - if _, err := io.Copy(canonicalDigester, layerFile); err != nil { + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } canonicalDigest := canonicalDigester.Digest() @@ -639,7 +639,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { digester := digest.NewCanonicalDigester() - resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, &digester)) + resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } @@ -704,7 +704,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp digester := digest.NewCanonicalDigester() - req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester)) + req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index a9a625b6..40841a5e 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -3,9 +3,6 @@ package storage import ( "fmt" "io" - "os" - "path" - "strconv" "time" "github.com/Sirupsen/logrus" @@ -15,14 +12,19 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) +var ( + errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available") +) + // layerWriter is used to control the various aspects of resumable // layer upload. It implements the LayerUpload interface. type blobWriter struct { blobStore *linkedBlobStore - id string - startedAt time.Time - resumableDigester digest.ResumableDigester + id string + startedAt time.Time + digester digest.Digester + written int64 // track the contiguous write // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface @@ -82,33 +84,31 @@ func (bw *blobWriter) Cancel(ctx context.Context) error { } func (bw *blobWriter) Write(p []byte) (int, error) { - if bw.resumableDigester == nil { - return bw.bufferedFileWriter.Write(p) - } - // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeHashAt(bw.blobStore.ctx, bw.offset); err != nil { + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { return 0, err } - return io.MultiWriter(&bw.bufferedFileWriter, bw.resumableDigester).Write(p) + n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + bw.written += int64(n) + + return n, err } func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { - if bw.resumableDigester == nil { - return bw.bufferedFileWriter.ReadFrom(r) - } - // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeHashAt(bw.blobStore.ctx, bw.offset); err != nil { + if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { return 0, err } - return bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.resumableDigester)) + nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + bw.written += nn + + return nn, err } func (bw *blobWriter) Close() error { @@ -116,10 +116,8 @@ func (bw *blobWriter) Close() error { return bw.err } - if bw.resumableDigester != nil { - if err := bw.storeHashState(bw.blobStore.ctx); err != nil { - return err - } + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + return err } return bw.bufferedFileWriter.Close() @@ -171,13 +169,11 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri desc.Length = bw.size } - if bw.resumableDigester != nil { - // Restore the hasher state to the end of the upload. - if err := bw.resumeHashAt(ctx, bw.size); err != nil { - return distribution.Descriptor{}, err - } + // TODO(stevvooe): This section is very meandering. Need to be broken down + // to be a lot more clear. - canonical = bw.resumableDigester.Digest() + if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + canonical = bw.digester.Digest() if canonical.Algorithm() == desc.Digest.Algorithm() { // Common case: client and server prefer the same canonical digest @@ -189,33 +185,49 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // uploaded content using that digest algorithm. fullHash = true } - } else { + } else if err == errResumableDigestNotAvailable { // Not using resumable digests, so we need to hash the entire layer. fullHash = true + } else { + return distribution.Descriptor{}, err } if fullHash { - digester := digest.NewCanonicalDigester() - - digestVerifier, err := digest.NewDigestVerifier(desc.Digest) - if err != nil { - return distribution.Descriptor{}, err + // a fantastic optimization: if the the written data and the size are + // the same, we don't need to read the data from the backend. This is + // because we've written the entire file in the lifecycle of the + // current instance. + if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() { + canonical = bw.digester.Digest() + verified = desc.Digest == canonical } - // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length) - if err != nil { - return distribution.Descriptor{}, err + // If the check based on size fails, we fall back to the slowest of + // paths. We may be able to make the size-based check a stronger + // guarantee, so this may be defensive. + if !verified { + digester := digest.NewCanonicalDigester() + + digestVerifier, err := digest.NewDigestVerifier(desc.Digest) + if err != nil { + return distribution.Descriptor{}, err + } + + // Read the file from the backend driver and validate it. + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length) + if err != nil { + return distribution.Descriptor{}, err + } + + tr := io.TeeReader(fr, digester.Hash()) + + if _, err := io.Copy(digestVerifier, tr); err != nil { + return distribution.Descriptor{}, err + } + + canonical = digester.Digest() + verified = digestVerifier.Verified() } - - tr := io.TeeReader(fr, digester) - - if _, err := io.Copy(digestVerifier, tr); err != nil { - return distribution.Descriptor{}, err - } - - canonical = digester.Digest() - verified = digestVerifier.Verified() } if !verified { @@ -298,172 +310,3 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor return bw.blobStore.driver.Move(ctx, bw.path, blobPath) } - -type hashStateEntry struct { - offset int64 - path string -} - -// getStoredHashStates returns a slice of hashStateEntries for this upload. -func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - alg: bw.resumableDigester.Digest().Algorithm(), - list: true, - }) - if err != nil { - return nil, err - } - - paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) - if err != nil { - if _, ok := err.(storagedriver.PathNotFoundError); !ok { - return nil, err - } - // Treat PathNotFoundError as no entries. - paths = nil - } - - hashStateEntries := make([]hashStateEntry, 0, len(paths)) - - for _, p := range paths { - pathSuffix := path.Base(p) - // The suffix should be the offset. - offset, err := strconv.ParseInt(pathSuffix, 0, 64) - if err != nil { - logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) - } - - hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) - } - - return hashStateEntries, nil -} - -// resumeHashAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (bw *blobWriter) resumeHashAt(ctx context.Context, offset int64) error { - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - - if offset == int64(bw.resumableDigester.Len()) { - // State of digester is already at the requested offset. - return nil - } - - // List hash states from storage backend. - var hashStateMatch hashStateEntry - hashStates, err := bw.getStoredHashStates(ctx) - if err != nil { - return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) - } - - // Find the highest stored hashState with offset less than or equal to - // the requested offset. - for _, hashState := range hashStates { - if hashState.offset == offset { - hashStateMatch = hashState - break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := bw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } - } - } - - if hashStateMatch.offset == 0 { - // No need to load any state, just reset the hasher. - bw.resumableDigester.Reset() - } else { - storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) - if err != nil { - return err - } - - if err = bw.resumableDigester.Restore(storedState); err != nil { - return err - } - } - - // Mind the gap. - if gapLen := offset - int64(bw.resumableDigester.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) - if err != nil { - return err - } - - if _, err = fr.Seek(int64(bw.resumableDigester.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", bw.resumableDigester.Len(), err) - } - - if _, err := io.CopyN(bw.resumableDigester, fr, gapLen); err != nil { - return err - } - } - - return nil -} - -func (bw *blobWriter) storeHashState(ctx context.Context) error { - uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - alg: bw.resumableDigester.Digest().Algorithm(), - offset: int64(bw.resumableDigester.Len()), - }) - if err != nil { - return err - } - - hashState, err := bw.resumableDigester.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) -} - -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} diff --git a/docs/storage/blobwriter_nonresumable.go b/docs/storage/blobwriter_nonresumable.go index ac2d7877..39166876 100644 --- a/docs/storage/blobwriter_nonresumable.go +++ b/docs/storage/blobwriter_nonresumable.go @@ -2,5 +2,16 @@ package storage -func (bw *blobWriter) setupResumableDigester() { +import ( + "github.com/docker/distribution/context" +) + +// resumeHashAt is a noop when resumable digest support is disabled. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + return errResumableDigestNotAvailable +} + +// storeHashState is a noop when resumable digest support is disabled. +func (bw *blobWriter) storeHashState(ctx context.Context) error { + return errResumableDigestNotAvailable } diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index f20a6c36..af847888 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -2,8 +2,198 @@ package storage -import "github.com/docker/distribution/digest" +import ( + "fmt" + "io" + "os" + "path" + "strconv" -func (bw *blobWriter) setupResumableDigester() { - bw.resumableDigester = digest.NewCanonicalResumableDigester() + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/stevvooe/resumable" + + // register resumable hashes with import + _ "github.com/stevvooe/resumable/sha256" + _ "github.com/stevvooe/resumable/sha512" +) + +// resumeDigestAt attempts to restore the state of the internal hash function +// by loading the most recent saved hash state less than or equal to the given +// offset. Any unhashed bytes remaining less than the given offset are hashed +// from the content uploaded so far. +func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + if offset < 0 { + return fmt.Errorf("cannot resume hash at negative offset: %d", offset) + } + + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + if offset == int64(h.Len()) { + // State of digester is already at the requested offset. + return nil + } + + // List hash states from storage backend. + var hashStateMatch hashStateEntry + hashStates, err := bw.getStoredHashStates(ctx) + if err != nil { + return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) + } + + // Find the highest stored hashState with offset less than or equal to + // the requested offset. + for _, hashState := range hashStates { + if hashState.offset == offset { + hashStateMatch = hashState + break // Found an exact offset match. + } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { + // This offset is closer to the requested offset. + hashStateMatch = hashState + } else if hashState.offset > offset { + // Remove any stored hash state with offsets higher than this one + // as writes to this resumed hasher will make those invalid. This + // is probably okay to skip for now since we don't expect anyone to + // use the API in this way. For that reason, we don't treat an + // an error here as a fatal error, but only log it. + if err := bw.driver.Delete(ctx, hashState.path); err != nil { + logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) + } + } + } + + if hashStateMatch.offset == 0 { + // No need to load any state, just reset the hasher. + h.Reset() + } else { + storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) + if err != nil { + return err + } + + if err = h.Restore(storedState); err != nil { + return err + } + } + + // Mind the gap. + if gapLen := offset - int64(h.Len()); gapLen > 0 { + // Need to read content from the upload to catch up to the desired offset. + fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) + if err != nil { + return err + } + + if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { + return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) + } + + if _, err := io.CopyN(h, fr, gapLen); err != nil { + return err + } + } + + return nil +} + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} + +type hashStateEntry struct { + offset int64 + path string +} + +// getStoredHashStates returns a slice of hashStateEntries for this upload. +func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { + uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + list: true, + }) + if err != nil { + return nil, err + } + + paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) + if err != nil { + if _, ok := err.(storagedriver.PathNotFoundError); !ok { + return nil, err + } + // Treat PathNotFoundError as no entries. + paths = nil + } + + hashStateEntries := make([]hashStateEntry, 0, len(paths)) + + for _, p := range paths { + pathSuffix := path.Base(p) + // The suffix should be the offset. + offset, err := strconv.ParseInt(pathSuffix, 0, 64) + if err != nil { + logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) + } + + hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) + } + + return hashStateEntries, nil +} + +func (bw *blobWriter) storeHashState(ctx context.Context) error { + h, ok := bw.digester.Hash().(resumable.Hash) + if !ok { + return errResumableDigestNotAvailable + } + + uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + alg: bw.digester.Digest().Algorithm(), + offset: int64(h.Len()), + }) + if err != nil { + return err + } + + hashState, err := h.State() + if err != nil { + return err + } + + return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) } diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 91dd0616..ceb53fa8 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -164,11 +164,10 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string blobStore: lbs, id: uuid, startedAt: startedAt, + digester: digest.NewCanonicalDigester(), bufferedFileWriter: *fw, } - bw.setupResumableDigester() - return bw, nil } From d4c50637f9fb1e8f18dfe8947973dbee2bfead83 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 22 May 2015 16:45:45 -0700 Subject: [PATCH 137/501] Better error message when failing to get AWS auth Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 4ed8e7cd..552c221d 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -201,7 +201,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { func New(params DriverParameters) (*Driver, error) { auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) } if !params.Secure { From bdaed4c78916e910b8f345585265355252f6912d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 21 May 2015 18:44:08 -0700 Subject: [PATCH 138/501] Refactor specification of supported digests To make the definition of supported digests more clear, we have refactored the digest package to have a special Algorithm type. This represents the digest's prefix and we associated various supported hash implementations through function calls. Signed-off-by: Stephen J Day --- docs/client/repository.go | 2 +- docs/handlers/api_test.go | 6 +++--- docs/storage/blobwriter.go | 7 ++++--- docs/storage/linkedblobstore.go | 2 +- docs/storage/paths.go | 6 +++--- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index cd93cd1a..a806aea4 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -321,7 +321,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut if err != nil { return distribution.Descriptor{}, err } - dgstr := digest.NewCanonicalDigester() + dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 1a41cfb8..5132f72e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -213,7 +213,7 @@ func TestBlobAPI(t *testing.T) { // Now, push just a chunk layerFile.Seek(0, 0) - canonicalDigester := digest.NewCanonicalDigester() + canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } @@ -637,7 +637,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { @@ -702,7 +702,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp uploadURL := u.String() - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 40841a5e..6a37e81d 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -1,6 +1,7 @@ package storage import ( + "errors" "fmt" "io" "time" @@ -13,7 +14,7 @@ import ( ) var ( - errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available") + errResumableDigestNotAvailable = errors.New("resumable digest not available") ) // layerWriter is used to control the various aspects of resumable @@ -197,7 +198,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. - if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() { + if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } @@ -206,7 +207,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { - digester := digest.NewCanonicalDigester() + digester := digest.Canonical.New() digestVerifier, err := digest.NewDigestVerifier(desc.Digest) if err != nil { diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index ceb53fa8..cb06e354 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -164,7 +164,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string blobStore: lbs, id: uuid, startedAt: startedAt, - digester: digest.NewCanonicalDigester(), + digester: digest.Canonical.New(), bufferedFileWriter: *fw, } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 9e150d3b..35debddf 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -262,7 +262,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -447,7 +447,7 @@ func (uploadStartedAtPathSpec) pathSpec() {} type uploadHashStatePathSpec struct { name string id string - alg string + alg digest.Algorithm offset int64 list bool } @@ -479,7 +479,7 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) return nil, err } - algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm} From f8c0086e93112279086a807a61adbf32d0463019 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 22 May 2015 15:55:54 -0700 Subject: [PATCH 139/501] Replace uuid dependency with internal library Signed-off-by: Stephen J Day --- docs/client/repository_test.go | 8 ++++---- docs/storage/linkedblobstore.go | 4 ++-- docs/storage/purgeuploads.go | 6 +++--- docs/storage/purgeuploads_test.go | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 9530bd37..6551c492 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "code.google.com/p/go-uuid/uuid" + "github.com/docker/distribution/uuid" "github.com/docker/distribution" "github.com/docker/distribution/context" @@ -141,7 +141,7 @@ func TestBlobUploadChunked(t *testing.T) { b1[513:1024], } repo := "test.example.com/uploadrepo" - uuids := []string{uuid.New()} + uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", @@ -159,7 +159,7 @@ func TestBlobUploadChunked(t *testing.T) { }) offset := 0 for i, chunk := range chunks { - uuids = append(uuids, uuid.New()) + uuids = append(uuids, uuid.Generate().String()) newOffset := offset + len(chunk) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -256,7 +256,7 @@ func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap repo := "test.example.com/uploadrepo" - uploadID := uuid.New() + uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 91dd0616..e3bd4b6e 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -4,11 +4,11 @@ import ( "net/http" "time" - "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" ) // linkedBlobStore provides a full BlobService that namespaces the blobs to a @@ -85,7 +85,7 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - uuid := uuid.New() + uuid := uuid.Generate().String() startedAt := time.Now().UTC() path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ diff --git a/docs/storage/purgeuploads.go b/docs/storage/purgeuploads.go index cf723070..c66f8881 100644 --- a/docs/storage/purgeuploads.go +++ b/docs/storage/purgeuploads.go @@ -5,10 +5,10 @@ import ( "strings" "time" - "code.google.com/p/go-uuid/uuid" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/uuid" ) // uploadData stored the location of temporary files created during a layer upload @@ -116,8 +116,8 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv func uUIDFromPath(path string) (string, bool) { components := strings.Split(path, "/") for i := len(components) - 1; i >= 0; i-- { - if uuid := uuid.Parse(components[i]); uuid != nil { - return uuid.String(), i == len(components)-1 + if u, err := uuid.Parse(components[i]); err == nil { + return u.String(), i == len(components)-1 } } return "", false diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go index d4408479..18c98af8 100644 --- a/docs/storage/purgeuploads_test.go +++ b/docs/storage/purgeuploads_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - "code.google.com/p/go-uuid/uuid" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/uuid" ) var pm = defaultPathMapper @@ -18,7 +18,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time. d := inmemory.New() ctx := context.Background() for i := 0; i < numUploads; i++ { - addUploads(ctx, t, d, uuid.New(), repoName, startedAt) + addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) } return d, ctx } @@ -73,7 +73,7 @@ func TestPurgeAll(t *testing.T) { fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) // Ensure > 1 repos are purged - addUploads(ctx, t, fs, uuid.New(), "test-repo2", oneHourAgo) + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) uploadCount++ deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) @@ -95,7 +95,7 @@ func TestPurgeSome(t *testing.T) { newUploadCount := 4 for i := 0; i < newUploadCount; i++ { - addUploads(ctx, t, fs, uuid.New(), "test-repo", time.Now().Add(1*time.Hour)) + addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) @@ -115,7 +115,7 @@ func TestPurgeOnlyUploads(t *testing.T) { // Create a directory tree outside _uploads and ensure // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.New()}) + dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } From f565d6abb75a3860c7fa602e1bd756cdb1434eaf Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 14 May 2015 18:21:39 -0700 Subject: [PATCH 140/501] Move ErrorCode logic to new errcode package Make HTTP status codes match the ErrorCode by looking it up in the Descriptors Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 206 ++++++++++++++++++ docs/api/{v2 => errcode}/errors_test.go | 8 +- docs/api/v2/descriptors.go | 223 +++---------------- docs/api/v2/errors.go | 270 ++++++++++++------------ docs/client/blob_writer_test.go | 5 +- docs/client/errors.go | 5 +- docs/client/repository_test.go | 3 +- docs/handlers/api_test.go | 11 +- docs/handlers/app.go | 30 +-- docs/handlers/app_test.go | 11 +- docs/handlers/blob.go | 7 +- docs/handlers/blobupload.go | 45 +--- docs/handlers/context.go | 3 +- docs/handlers/helpers.go | 12 ++ docs/handlers/images.go | 9 - docs/handlers/tags.go | 1 - 16 files changed, 444 insertions(+), 405 deletions(-) create mode 100644 docs/api/errcode/errors.go rename docs/api/{v2 => errcode}/errors_test.go (98%) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go new file mode 100644 index 00000000..ce3c0624 --- /dev/null +++ b/docs/api/errcode/errors.go @@ -0,0 +1,206 @@ +package errcode + +import ( + "fmt" + "net/http" + "strings" +) + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} +) + +const ( + // ErrorCodeUnknown is a catch-all for errors not defined below. + ErrorCodeUnknown ErrorCode = 10000 + iota +) + +var errorDescriptors = []ErrorDescriptor{ + { + Code: ErrorCodeUnknown, + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }, +} + +// LoadErrors will register a new set of Errors into the system +func LoadErrors(errs *[]ErrorDescriptor) { + for _, descriptor := range *errs { + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %s is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %d is already registered", descriptor.Code)) + } + + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + } +} + +// ParseErrorCode attempts to parse the error code string, returning +// ErrorCodeUnknown if the error is not known. +func ParseErrorCode(s string) ErrorCode { + desc, ok := idToDescriptors[s] + + if !ok { + return ErrorCodeUnknown + } + + return desc.Code +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Push pushes an error on to the error stack, with the optional detail +// argument. It is a programming error (ie panic) to push more than one +// detail at a time. +func (errs *Errors) Push(code ErrorCode, details ...interface{}) { + if len(details) > 1 { + panic("please specify zero or one detail items for this error") + } + + var detail interface{} + if len(details) > 0 { + detail = details[0] + } + + if err, ok := detail.(error); ok { + detail = err.Error() + } + + errs.PushErr(Error{ + Code: code, + Message: code.Message(), + Detail: detail, + }) +} + +// PushErr pushes an error interface onto the error stack. +func (errs *Errors) PushErr(err error) { + switch err.(type) { + case Error: + errs.Errors = append(errs.Errors, err.(Error)) + default: + errs.Errors = append(errs.Errors, Error{Message: err.Error()}) + } +} + +func (errs *Errors) Error() string { + switch errs.Len() { + case 0: + return "" + case 1: + return errs.Errors[0].Error() + default: + msg := "errors:\n" + for _, err := range errs.Errors { + msg += err.Error() + "\n" + } + return msg + } +} + +// Clear clears the errors. +func (errs *Errors) Clear() { + errs.Errors = nil +} + +// Len returns the current number of errors. +func (errs *Errors) Len() int { + return len(errs.Errors) +} + +// init loads the default errors that are part of the errcode package +func init() { + LoadErrors(&errorDescriptors) +} diff --git a/docs/api/v2/errors_test.go b/docs/api/errcode/errors_test.go similarity index 98% rename from docs/api/v2/errors_test.go rename to docs/api/errcode/errors_test.go index 9cc831c4..eedb22ed 100644 --- a/docs/api/v2/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -1,11 +1,11 @@ -package v2 +package errcode import ( "encoding/json" - "reflect" + // "reflect" "testing" - "github.com/docker/distribution/digest" + // "github.com/docker/distribution/digest" ) // TestErrorCodes ensures that error code format, mappings and @@ -56,6 +56,7 @@ func TestErrorCodes(t *testing.T) { // TestErrorsManagement does a quick check of the Errors type to ensure that // members are properly pushed and marshaled. +/* func TestErrorsManagement(t *testing.T) { var errs Errors @@ -163,3 +164,4 @@ func TestMarshalUnmarshal(t *testing.T) { t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) } } +*/ diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index d7c4a880..db5a9270 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -5,6 +5,7 @@ import ( "regexp" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" ) var ( @@ -98,7 +99,7 @@ var ( Format: "", }, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, Body: BodyDescriptor{ @@ -119,7 +120,7 @@ var ( Format: "", }, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, Body: BodyDescriptor{ @@ -174,7 +175,7 @@ var APIDescriptor = struct { // ErrorDescriptors provides a list of the error codes and their // associated documentation and metadata. - ErrorDescriptors []ErrorDescriptor + ErrorDescriptors []errcode.ErrorDescriptor }{ RouteDescriptors: routeDescriptors, ErrorDescriptors: errorDescriptors, @@ -275,7 +276,7 @@ type ResponseDescriptor struct { // ErrorCodes enumerates the error codes that may be returned along with // the response. - ErrorCodes []ErrorCode + ErrorCodes []errcode.ErrorCode // Body describes the body of the response, if any. Body BodyDescriptor @@ -317,30 +318,6 @@ type ParameterDescriptor struct { Examples []string } -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCodes provides a list of status under which this error - // condition may arise. If it is empty, the error condition may be seen - // for any status code. - HTTPStatusCodes []int -} - var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBase, @@ -374,7 +351,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, }, @@ -438,7 +415,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, }, }, @@ -449,7 +426,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, }, @@ -495,7 +472,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, @@ -511,14 +488,14 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, }, { Description: "The named manifest is not known to the registry.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, @@ -573,7 +550,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, ErrorCodeManifestInvalid, @@ -588,7 +565,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, }, @@ -596,7 +573,7 @@ var routeDescriptors = []RouteDescriptor{ Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ @@ -625,7 +602,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, Body: BodyDescriptor{ @@ -660,7 +637,7 @@ var routeDescriptors = []RouteDescriptor{ Name: "Invalid Name or Tag", Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, @@ -680,7 +657,7 @@ var routeDescriptors = []RouteDescriptor{ Format: "", }, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeUnauthorized, }, Body: BodyDescriptor{ @@ -692,7 +669,7 @@ var routeDescriptors = []RouteDescriptor{ Name: "Unknown Manifest", Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, @@ -765,7 +742,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, @@ -782,7 +759,7 @@ var routeDescriptors = []RouteDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, @@ -834,7 +811,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, @@ -846,7 +823,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponse, { StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, @@ -926,7 +903,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, @@ -970,7 +947,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, @@ -1024,7 +1001,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1038,7 +1015,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1096,7 +1073,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1110,7 +1087,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1175,7 +1152,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1189,7 +1166,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1266,7 +1243,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, @@ -1280,7 +1257,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1321,7 +1298,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, @@ -1334,7 +1311,7 @@ var routeDescriptors = []RouteDescriptor{ { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, - ErrorCodes: []ErrorCode{ + ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ @@ -1350,143 +1327,11 @@ var routeDescriptors = []RouteDescriptor{ }, } -// ErrorDescriptors provides a list of HTTP API Error codes that may be -// encountered when interacting with the registry API. -var errorDescriptors = []ErrorDescriptor{ - { - Code: ErrorCodeUnknown, - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - }, - { - Code: ErrorCodeUnsupported, - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - }, - { - Code: ErrorCodeUnauthorized, - Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, - }, - { - Code: ErrorCodeDigestInvalid, - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeSizeInvalid, - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeNameInvalid, - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeTagInvalid, - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeNameUnknown, - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestUnknown, - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestInvalid, - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeManifestUnverified, - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeBlobUnknown, - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - - { - Code: ErrorCodeBlobUploadUnknown, - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeBlobUploadInvalid, - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, -} - -var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor -var idToDescriptors map[string]ErrorDescriptor var routeDescriptorsMap map[string]RouteDescriptor func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - for _, descriptor := range errorDescriptors { - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - } for _, descriptor := range routeDescriptors { routeDescriptorsMap[descriptor.Name] = descriptor } diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index cbae020e..fc61549b 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -1,20 +1,14 @@ package v2 import ( - "fmt" - "strings" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" ) -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - const ( - // ErrorCodeUnknown is a catch-all for errors not defined below. - ErrorCodeUnknown ErrorCode = iota - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported + ErrorCodeUnsupported = iota // ErrorCodeUnauthorized is returned if a request is not authorized. ErrorCodeUnauthorized @@ -50,6 +44,10 @@ const ( // signature verfication. ErrorCodeManifestUnverified + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown + // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. @@ -62,133 +60,133 @@ const ( ErrorCodeBlobUploadInvalid ) -// ParseErrorCode attempts to parse the error code string, returning -// ErrorCodeUnknown if the error is not known. -func ParseErrorCode(s string) ErrorCode { - desc, ok := idToDescriptors[s] +// ErrorDescriptors provides a list of HTTP API Error codes that may be +// encountered when interacting with the registry API. +var errorDescriptors = []errcode.ErrorDescriptor{ + { + Code: ErrorCodeUnsupported, + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + }, + { + Code: ErrorCodeUnauthorized, + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + HTTPStatusCode: http.StatusForbidden, + }, + { + Code: ErrorCodeDigestInvalid, + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeSizeInvalid, + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeNameInvalid, + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeTagInvalid, + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeNameUnknown, + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }, + { + Code: ErrorCodeManifestUnknown, + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }, + { + Code: ErrorCodeManifestInvalid, + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeManifestUnverified, + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeManifestBlobUnknown, + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeBlobUnknown, + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }, - if !ok { - return ErrorCodeUnknown - } - - return desc.Code + { + Code: ErrorCodeBlobUploadUnknown, + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }, + { + Code: ErrorCodeBlobUploadInvalid, + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }, } -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors struct { - Errors []Error `json:"errors,omitempty"` -} - -// Push pushes an error on to the error stack, with the optional detail -// argument. It is a programming error (ie panic) to push more than one -// detail at a time. -func (errs *Errors) Push(code ErrorCode, details ...interface{}) { - if len(details) > 1 { - panic("please specify zero or one detail items for this error") - } - - var detail interface{} - if len(details) > 0 { - detail = details[0] - } - - if err, ok := detail.(error); ok { - detail = err.Error() - } - - errs.PushErr(Error{ - Code: code, - Message: code.Message(), - Detail: detail, - }) -} - -// PushErr pushes an error interface onto the error stack. -func (errs *Errors) PushErr(err error) { - switch err.(type) { - case Error: - errs.Errors = append(errs.Errors, err.(Error)) - default: - errs.Errors = append(errs.Errors, Error{Message: err.Error()}) - } -} - -func (errs *Errors) Error() string { - switch errs.Len() { - case 0: - return "" - case 1: - return errs.Errors[0].Error() - default: - msg := "errors:\n" - for _, err := range errs.Errors { - msg += err.Error() + "\n" - } - return msg - } -} - -// Clear clears the errors. -func (errs *Errors) Clear() { - errs.Errors = errs.Errors[:0] -} - -// Len returns the current number of errors. -func (errs *Errors) Len() int { - return len(errs.Errors) +// init registers our errors with the errcode system +func init() { + errcode.LoadErrors(&errorDescriptors) } diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 674d6e01..3fdeb6ee 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" ) @@ -161,14 +162,14 @@ func TestUploadReadFrom(t *testing.T) { if err == nil { t.Fatalf("Expected error when not found") } - if uploadErr, ok := err.(*v2.Errors); !ok { + if uploadErr, ok := err.(*errcode.Errors); !ok { t.Fatalf("Wrong error type %T: %s", err, err) } else if len(uploadErr.Errors) != 1 { t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr.Errors)) } else { v2Err := uploadErr.Errors[0] if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { - t.Fatalf("Unexpected error code: %s, expected %s", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid.String()) + t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) } if expected := "invalid upload identifier"; v2Err.Message != expected { t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Message, expected) diff --git a/docs/client/errors.go b/docs/client/errors.go index 2638055d..ef25dddf 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net/http" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" ) @@ -32,7 +33,7 @@ func (e *UnexpectedHTTPResponseError) Error() string { } func parseHTTPErrorResponse(r io.Reader) error { - var errors v2.Errors + var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err @@ -51,7 +52,7 @@ func handleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return &v2.Error{ + return &errcode.Error{ Code: v2.ErrorCodeUnauthorized, Message: "401 Unauthorized", Detail: uErr.Response, diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 6551c492..24946ed5 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" ) @@ -668,7 +669,7 @@ func TestManifestUnauthorized(t *testing.T) { if err == nil { t.Fatal("Expected error fetching manifest") } - v2Err, ok := err.(*v2.Error) + v2Err, ok := err.(*errcode.Error) if !ok { t.Fatalf("Unexpected error type: %#v", err) } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 5132f72e..c5a99453 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -19,6 +19,7 @@ import ( "github.com/docker/distribution/configuration" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -373,7 +374,7 @@ func TestManifestAPI(t *testing.T) { _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) - expectedCounts := map[v2.ErrorCode]int{ + expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestUnverified: 1, v2.ErrorCodeBlobUnknown: 2, v2.ErrorCodeDigestInvalid: 2, @@ -748,13 +749,13 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus // checkBodyHasErrorCodes ensures the body is an error body and has the // expected error codes, returning the error structure, the json slice and a // count of the errors by code. -func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...v2.ErrorCode) (v2.Errors, []byte, map[v2.ErrorCode]int) { +func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { p, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("unexpected error reading body %s: %v", msg, err) } - var errs v2.Errors + var errs errcode.Errors if err := json.Unmarshal(p, &errs); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } @@ -770,8 +771,8 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error // resp.Header.Get("Content-Type")) // } - expected := map[v2.ErrorCode]struct{}{} - counts := map[v2.ErrorCode]int{} + expected := map[errcode.ErrorCode]struct{}{} + counts := map[errcode.ErrorCode]int{} // Initialize map with zeros for expected for _, code := range errorCodes { diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 1d58e945..2747ac8b 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -13,6 +13,7 @@ import ( "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/notifications" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" registrymiddleware "github.com/docker/distribution/registry/middleware/registry" @@ -350,7 +351,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Errors.Push(v2.ErrorCodeNameInvalid, err) } - w.WriteHeader(http.StatusBadRequest) serveJSON(w, context.Errors) return } @@ -363,8 +363,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) + context.Errors.Push(errcode.ErrorCodeUnknown, err) + serveJSON(w, context.Errors) return } @@ -375,19 +375,14 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { - if context.Value("http.response.status") == 0 { - // TODO(stevvooe): Getting this value from the context is a - // bit of a hack. We can further address with some of our - // future refactoring. - w.WriteHeader(http.StatusBadRequest) - } app.logError(context, context.Errors) + serveJSON(w, context.Errors) } }) } -func (app *App) logError(context context.Context, errors v2.Errors) { +func (app *App) logError(context context.Context, errors errcode.Errors) { for _, e := range errors.Errors { c := ctxu.WithValue(context, "err.code", e.Code) c = ctxu.WithValue(c, "err.message", e.Message) @@ -444,11 +439,10 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // base route is accessed. This section prevents us from making // that mistake elsewhere in the code, allowing any operation to // proceed. - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusForbidden) - var errs v2.Errors + var errs errcode.Errors errs.Push(v2.ErrorCodeUnauthorized) + serveJSON(w, errs) return fmt.Errorf("forbidden: no repository name") } @@ -458,10 +452,18 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont if err != nil { switch err := err.(type) { case auth.Challenge: + // Since err.ServeHTTP will set the HTTP status code for us + // we need to set the content-type here. The serveJSON + // func will try to do it but it'll be too late at that point. + // I would have have preferred to just have the auth.Challenge + // ServerHTTP func just add the WWW-Authenticate header and let + // serveJSON set the HTTP status code and content-type but I wasn't + // sure if that's an ok design change. STEVVOOE ? w.Header().Set("Content-Type", "application/json; charset=utf-8") + err.ServeHTTP(w, r) - var errs v2.Errors + var errs errcode.Errors errs.Push(v2.ErrorCodeUnauthorized, accessRecords) serveJSON(w, errs) default: diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index fd1c486c..da76dc0d 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" @@ -185,16 +186,18 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected status code during request: %v", err) } - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") - } + /* + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } + */ expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) } - var errs v2.Errors + var errs errcode.Errors dec := json.NewDecoder(req.Body) if err := dec.Decode(&errs); err != nil { t.Fatalf("error decoding error response: %v", err) diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index 3237b195..56699fe9 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -6,6 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) @@ -17,7 +18,6 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { if err == errDigestNotAvailable { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) }) } @@ -53,17 +53,16 @@ func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { desc, err := blobs.Stat(bh, bh.Digest) if err != nil { if err == distribution.ErrBlobUnknown { - w.WriteHeader(http.StatusNotFound) bh.Errors.Push(v2.ErrorCodeBlobUnknown, bh.Digest) } else { - bh.Errors.Push(v2.ErrorCodeUnknown, err) + bh.Errors.Push(errcode.ErrorCodeUnknown, err) } return } if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors.Push(v2.ErrorCodeUnknown, err) + bh.Errors.Push(errcode.ErrorCodeUnknown, err) return } } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 99a75698..7046edd3 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) @@ -36,7 +37,6 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) } @@ -45,7 +45,6 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.Name != ctx.Repository.Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) } @@ -53,7 +52,6 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.UUID != buh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) }) } @@ -64,14 +62,12 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == distribution.ErrBlobUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) }) } buh.Upload = upload @@ -85,7 +81,6 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { defer upload.Close() ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) upload.Cancel(buh) }) @@ -93,7 +88,6 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { defer upload.Close() ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) upload.Cancel(buh) }) @@ -125,8 +119,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh) if err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -134,8 +127,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req defer buh.Upload.Close() if err := buh.blobUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -146,7 +138,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req // GetUploadStatus returns the status of a given upload, identified by id. func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) return } @@ -155,8 +146,7 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // resumable upload is supported. This will enable returning a non-zero // range for clients to begin uploading at an offset. if err := buh.blobUploadResponse(w, r, true); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -167,14 +157,13 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // PatchBlobData writes data to an upload. func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) return } ct := r.Header.Get("Content-Type") if ct != "" && ct != "application/octet-stream" { - w.WriteHeader(http.StatusBadRequest) + buh.Errors.Push(errcode.ErrorCodeUnknown, fmt.Errorf("Bad Content-Type")) // TODO(dmcgowan): encode error return } @@ -184,14 +173,12 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // Copy the data if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } if err := buh.blobUploadResponse(w, r, false); err != nil { - w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -205,7 +192,6 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // url of the blob. func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) return } @@ -214,7 +200,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if dgstStr == "" { // no digest? return error, but allow retry. - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") return } @@ -222,7 +207,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht dgst, err := digest.ParseDigest(dgstStr) if err != nil { // no digest? return error, but allow retry. - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") return } @@ -230,8 +214,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // Read in the data, if any. if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -246,17 +229,14 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if err != nil { switch err := err.(type) { case distribution.ErrBlobInvalidDigest: - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeDigestInvalid, err) default: switch err { case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - w.WriteHeader(http.StatusBadRequest) buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) default: ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) - w.WriteHeader(http.StatusInternalServerError) - buh.Errors.Push(v2.ErrorCodeUnknown, err) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) } } @@ -273,8 +253,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // Build our canonical blob url blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) if err != nil { - buh.Errors.Push(v2.ErrorCodeUnknown, err) - w.WriteHeader(http.StatusInternalServerError) + buh.Errors.Push(errcode.ErrorCodeUnknown, err) return } @@ -287,7 +266,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // CancelBlobUpload cancels an in-progress upload of a blob. func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - w.WriteHeader(http.StatusNotFound) buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) return } @@ -295,7 +273,6 @@ func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Re w.Header().Set("Docker-Upload-UUID", buh.UUID) if err := buh.Upload.Cancel(buh); err != nil { ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - w.WriteHeader(http.StatusInternalServerError) buh.Errors.PushErr(err) } diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 0df55346..85a17123 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "golang.org/x/net/context" ) @@ -27,7 +28,7 @@ type Context struct { // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. - Errors v2.Errors + Errors errcode.Errors urlBuilder *v2.URLBuilder diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index f2879137..3611a72d 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -2,6 +2,7 @@ package handlers import ( "encoding/json" + "github.com/docker/distribution/registry/api/errcode" "io" "net/http" ) @@ -11,6 +12,17 @@ import ( // ResponseWriter.WriteHeader before this function. func serveJSON(w http.ResponseWriter, v interface{}) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") + sc := http.StatusInternalServerError + + if errs, ok := v.(errcode.Errors); ok && errs.Len() > 0 { + sc = errs.Errors[0].Code.Descriptor().HTTPStatusCode + if sc == 0 { + sc = http.StatusInternalServerError + } + } + + w.WriteHeader(sc) + enc := json.NewEncoder(w) if err := enc.Encode(v); err != nil { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 45029da5..d717cf72 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -64,7 +64,6 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http if err != nil { imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) - w.WriteHeader(http.StatusNotFound) return } @@ -73,7 +72,6 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http dgst, err := digestManifest(imh, sm) if err != nil { imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) return } @@ -95,14 +93,12 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http var manifest manifest.SignedManifest if err := dec.Decode(&manifest); err != nil { imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) - w.WriteHeader(http.StatusBadRequest) return } dgst, err := digestManifest(imh, &manifest) if err != nil { imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) - w.WriteHeader(http.StatusBadRequest) return } @@ -111,7 +107,6 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if manifest.Tag != imh.Tag { ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) imh.Errors.Push(v2.ErrorCodeTagInvalid) - w.WriteHeader(http.StatusBadRequest) return } @@ -120,12 +115,10 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if dgst != imh.Digest { ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) imh.Errors.Push(v2.ErrorCodeDigestInvalid) - w.WriteHeader(http.StatusBadRequest) return } } else { imh.Errors.Push(v2.ErrorCodeTagInvalid, "no tag or digest specified") - w.WriteHeader(http.StatusBadRequest) return } @@ -152,7 +145,6 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http imh.Errors.PushErr(err) } - w.WriteHeader(http.StatusBadRequest) return } @@ -180,7 +172,6 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h // Once we work out schema version 2, the full deletion system will be // worked out and we can add support back. imh.Errors.Push(v2.ErrorCodeUnsupported) - w.WriteHeader(http.StatusBadRequest) } // digestManifest takes a digest of the given manifest. This belongs somewhere diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index be84fae5..44b12dfd 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -39,7 +39,6 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - w.WriteHeader(404) th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) default: th.Errors.PushErr(err) From 00b1e8fca06c30abfc0fa1fdf228f3739ec7534b Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Tue, 26 May 2015 17:18:32 -0700 Subject: [PATCH 141/501] Round 2 Make Errors a []Error Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 62 ++++++++++++--------------------- docs/api/v2/errors.go | 2 +- docs/client/blob_writer_test.go | 19 +++++----- docs/client/errors.go | 7 ++-- docs/client/repository_test.go | 4 +-- docs/handlers/api_test.go | 4 +-- docs/handlers/app.go | 14 ++++---- docs/handlers/app_test.go | 4 +-- docs/handlers/blob.go | 10 +++--- docs/handlers/blobupload.go | 50 +++++++++++++------------- docs/handlers/helpers.go | 4 +-- docs/handlers/images.go | 27 +++++++------- docs/handlers/tags.go | 7 ++-- 13 files changed, 98 insertions(+), 116 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index ce3c0624..c46670a1 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -55,8 +55,8 @@ var errorDescriptors = []ErrorDescriptor{ } // LoadErrors will register a new set of Errors into the system -func LoadErrors(errs *[]ErrorDescriptor) { - for _, descriptor := range *errs { +func LoadErrors(errs []ErrorDescriptor) { + for _, descriptor := range errs { if _, ok := idToDescriptors[descriptor.Value]; ok { panic(fmt.Sprintf("ErrorValue %s is already registered", descriptor.Value)) } @@ -123,28 +123,28 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` + Code ErrorCode `json:"code"` + Detail interface{} `json:"detail,omitempty"` } // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) + e.Code.Message()) +} + +// Message returned the human-readable error message for this Error +func (e Error) Message() string { + return e.Code.Message() } // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. -type Errors struct { - Errors []Error `json:"errors,omitempty"` -} +type Errors []Error -// Push pushes an error on to the error stack, with the optional detail -// argument. It is a programming error (ie panic) to push more than one -// detail at a time. -func (errs *Errors) Push(code ErrorCode, details ...interface{}) { +// NewError creates a new Error struct based on the passed-in info +func NewError(code ErrorCode, details ...interface{}) Error { if len(details) > 1 { panic("please specify zero or one detail items for this error") } @@ -158,49 +158,33 @@ func (errs *Errors) Push(code ErrorCode, details ...interface{}) { detail = err.Error() } - errs.PushErr(Error{ - Code: code, - Message: code.Message(), - Detail: detail, - }) -} - -// PushErr pushes an error interface onto the error stack. -func (errs *Errors) PushErr(err error) { - switch err.(type) { - case Error: - errs.Errors = append(errs.Errors, err.(Error)) - default: - errs.Errors = append(errs.Errors, Error{Message: err.Error()}) + return Error{ + Code: code, + Detail: detail, } } -func (errs *Errors) Error() string { - switch errs.Len() { +func (errs Errors) Error() string { + switch len(errs) { case 0: return "" case 1: - return errs.Errors[0].Error() + return errs[0].Error() default: msg := "errors:\n" - for _, err := range errs.Errors { + for _, err := range errs { msg += err.Error() + "\n" } return msg } } -// Clear clears the errors. -func (errs *Errors) Clear() { - errs.Errors = nil -} - // Len returns the current number of errors. -func (errs *Errors) Len() int { - return len(errs.Errors) +func (errs Errors) Len() int { + return len(errs) } // init loads the default errors that are part of the errcode package func init() { - LoadErrors(&errorDescriptors) + LoadErrors(errorDescriptors) } diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index fc61549b..9655dba8 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -188,5 +188,5 @@ var errorDescriptors = []errcode.ErrorDescriptor{ // init registers our errors with the errcode system func init() { - errcode.LoadErrors(&errorDescriptors) + errcode.LoadErrors(errorDescriptors) } diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 3fdeb6ee..74545b06 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -86,15 +86,12 @@ func TestUploadReadFrom(t *testing.T) { Response: testutil.Response{ StatusCode: http.StatusBadRequest, Body: []byte(` - { - "errors": [ + [ { "code": "BLOB_UPLOAD_INVALID", - "message": "invalid upload identifier", "detail": "more detail" } - ] - }`), + ] `), }, }, // Test 400 invalid json @@ -162,17 +159,17 @@ func TestUploadReadFrom(t *testing.T) { if err == nil { t.Fatalf("Expected error when not found") } - if uploadErr, ok := err.(*errcode.Errors); !ok { + if uploadErr, ok := err.(errcode.Errors); !ok { t.Fatalf("Wrong error type %T: %s", err, err) - } else if len(uploadErr.Errors) != 1 { - t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr.Errors)) + } else if len(uploadErr) != 1 { + t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) } else { - v2Err := uploadErr.Errors[0] + v2Err := uploadErr[0] if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) } - if expected := "invalid upload identifier"; v2Err.Message != expected { - t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Message, expected) + if expected := "blob upload invalid"; v2Err.Message() != expected { + t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Message(), expected) } if expected := "more detail"; v2Err.Detail.(string) != expected { t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Detail.(string), expected) diff --git a/docs/client/errors.go b/docs/client/errors.go index ef25dddf..e743533b 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -45,7 +45,7 @@ func parseHTTPErrorResponse(r io.Reader) error { Response: body, } } - return &errors + return errors } func handleErrorResponse(resp *http.Response) error { @@ -53,9 +53,8 @@ func handleErrorResponse(resp *http.Response) error { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return &errcode.Error{ - Code: v2.ErrorCodeUnauthorized, - Message: "401 Unauthorized", - Detail: uErr.Response, + Code: v2.ErrorCodeUnauthorized, + Detail: uErr.Response, } } return err diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 24946ed5..7dbe97cf 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -676,7 +676,7 @@ func TestManifestUnauthorized(t *testing.T) { if v2Err.Code != v2.ErrorCodeUnauthorized { t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) } - if expected := "401 Unauthorized"; v2Err.Message != expected { - t.Fatalf("Unexpected message value: %s, expected %s", v2Err.Message, expected) + if expected := errcode.ErrorCode(v2.ErrorCodeUnauthorized).Message(); v2Err.Message() != expected { + t.Fatalf("Unexpected message value: %s, expected %s", v2Err.Message(), expected) } } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index c5a99453..146fcf4c 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -760,7 +760,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error t.Fatalf("unexpected error decoding error response: %v", err) } - if len(errs.Errors) == 0 { + if len(errs) == 0 { t.Fatalf("expected errors in response") } @@ -780,7 +780,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error counts[code] = 0 } - for _, err := range errs.Errors { + for _, err := range errs { if _, ok := expected[err.Code]; !ok { t.Fatalf("unexpected error code %v encountered during %s: %s ", err.Code, msg, string(p)) } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 2747ac8b..12c6e227 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -346,9 +346,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - context.Errors.Push(v2.ErrorCodeNameUnknown, err) + context.Errors = append(context.Errors, errcode.NewError(v2.ErrorCodeNameUnknown, err)) case distribution.ErrRepositoryNameInvalid: - context.Errors.Push(v2.ErrorCodeNameInvalid, err) + context.Errors = append(context.Errors, errcode.NewError(v2.ErrorCodeNameInvalid, err)) } serveJSON(w, context.Errors) @@ -363,7 +363,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors.Push(errcode.ErrorCodeUnknown, err) + context.Errors = append(context.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) serveJSON(w, context.Errors) return @@ -383,9 +383,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } func (app *App) logError(context context.Context, errors errcode.Errors) { - for _, e := range errors.Errors { + for _, e := range errors { c := ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Message) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) c = ctxu.WithValue(c, "err.detail", e.Detail) c = ctxu.WithLogger(c, ctxu.GetLogger(c, "err.code", @@ -441,7 +441,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // proceed. var errs errcode.Errors - errs.Push(v2.ErrorCodeUnauthorized) + errs = append(errs, errcode.NewError(v2.ErrorCodeUnauthorized)) serveJSON(w, errs) return fmt.Errorf("forbidden: no repository name") @@ -464,7 +464,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont err.ServeHTTP(w, r) var errs errcode.Errors - errs.Push(v2.ErrorCodeUnauthorized, accessRecords) + errs = append(errs, errcode.NewError(v2.ErrorCodeUnauthorized, accessRecords)) serveJSON(w, errs) default: // This condition is a potential security problem either in diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index da76dc0d..0520cb40 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -203,8 +203,8 @@ func TestNewApp(t *testing.T) { t.Fatalf("error decoding error response: %v", err) } - if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) + if errs[0].Code != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", errs[0].Code, v2.ErrorCodeUnauthorized) } } diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index 56699fe9..fa9f576a 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -18,12 +18,12 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { if err == errDigestNotAvailable { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + ctx.Errors = append(ctx.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) + ctx.Errors = append(ctx.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) }) } @@ -53,16 +53,16 @@ func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { desc, err := blobs.Stat(bh, bh.Digest) if err != nil { if err == distribution.ErrBlobUnknown { - bh.Errors.Push(v2.ErrorCodeBlobUnknown, bh.Digest) + bh.Errors = append(bh.Errors, errcode.NewError(v2.ErrorCodeBlobUnknown, bh.Digest)) } else { - bh.Errors.Push(errcode.ErrorCodeUnknown, err) + bh.Errors = append(bh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) } return } if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors.Push(errcode.ErrorCodeUnknown, err) + bh.Errors = append(bh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 7046edd3..7e8c3962 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -37,7 +37,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) }) } buh.State = state @@ -45,14 +45,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.Name != ctx.Repository.Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) }) } if state.UUID != buh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) }) } @@ -62,12 +62,12 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == distribution.ErrBlobUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown, err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) }) } buh.Upload = upload @@ -81,14 +81,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { defer upload.Close() ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) upload.Cancel(buh) }) } else if nn != buh.State.Offset { defer upload.Close() ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) upload.Cancel(buh) }) } @@ -119,7 +119,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh) if err != nil { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -127,7 +127,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req defer buh.Upload.Close() if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -138,7 +138,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req // GetUploadStatus returns the status of a given upload, identified by id. func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) return } @@ -146,7 +146,7 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // resumable upload is supported. This will enable returning a non-zero // range for clients to begin uploading at an offset. if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -157,13 +157,13 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // PatchBlobData writes data to an upload. func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) return } ct := r.Header.Get("Content-Type") if ct != "" && ct != "application/octet-stream" { - buh.Errors.Push(errcode.ErrorCodeUnknown, fmt.Errorf("Bad Content-Type")) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, fmt.Errorf("Bad Content-Type"))) // TODO(dmcgowan): encode error return } @@ -173,12 +173,12 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // Copy the data if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } if err := buh.blobUploadResponse(w, r, false); err != nil { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -192,7 +192,7 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // url of the blob. func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) return } @@ -200,21 +200,21 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if dgstStr == "" { // no digest? return error, but allow retry. - buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest missing") + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, "digest missing")) return } dgst, err := digest.ParseDigest(dgstStr) if err != nil { // no digest? return error, but allow retry. - buh.Errors.Push(v2.ErrorCodeDigestInvalid, "digest parsing failed") + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, "digest parsing failed")) return } // Read in the data, if any. if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -229,14 +229,14 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if err != nil { switch err := err.(type) { case distribution.ErrBlobInvalidDigest: - buh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) default: switch err { case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - buh.Errors.Push(v2.ErrorCodeBlobUploadInvalid, err) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) default: ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) } } @@ -253,7 +253,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // Build our canonical blob url blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) if err != nil { - buh.Errors.Push(errcode.ErrorCodeUnknown, err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } @@ -266,14 +266,14 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // CancelBlobUpload cancels an in-progress upload of a blob. func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) + buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) return } w.Header().Set("Docker-Upload-UUID", buh.UUID) if err := buh.Upload.Cancel(buh); err != nil { ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - buh.Errors.PushErr(err) + buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) } w.WriteHeader(http.StatusNoContent) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 3611a72d..f4f24175 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -14,8 +14,8 @@ func serveJSON(w http.ResponseWriter, v interface{}) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") sc := http.StatusInternalServerError - if errs, ok := v.(errcode.Errors); ok && errs.Len() > 0 { - sc = errs.Errors[0].Code.Descriptor().HTTPStatusCode + if errs, ok := v.(errcode.Errors); ok && len(errs) > 0 { + sc = errs[0].Code.Descriptor().HTTPStatusCode if sc == 0 { sc = http.StatusInternalServerError } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index d717cf72..9d025c78 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -10,6 +10,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" "golang.org/x/net/context" @@ -63,7 +64,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http } if err != nil { - imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestUnknown, err)) return } @@ -71,7 +72,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http if imh.Digest == "" { dgst, err := digestManifest(imh, sm) if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) return } @@ -92,13 +93,13 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http var manifest manifest.SignedManifest if err := dec.Decode(&manifest); err != nil { - imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestInvalid, err)) return } dgst, err := digestManifest(imh, &manifest) if err != nil { - imh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) return } @@ -106,7 +107,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if imh.Tag != "" { if manifest.Tag != imh.Tag { ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors.Push(v2.ErrorCodeTagInvalid) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeTagInvalid)) return } @@ -114,11 +115,11 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } else if imh.Digest != "" { if dgst != imh.Digest { ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) - imh.Errors.Push(v2.ErrorCodeDigestInvalid) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid)) return } } else { - imh.Errors.Push(v2.ErrorCodeTagInvalid, "no tag or digest specified") + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeTagInvalid, "no tag or digest specified")) return } @@ -130,19 +131,19 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http for _, verificationError := range err { switch verificationError := verificationError.(type) { case distribution.ErrManifestBlobUnknown: - imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.Digest) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeBlobUnknown, verificationError.Digest)) case distribution.ErrManifestUnverified: - imh.Errors.Push(v2.ErrorCodeManifestUnverified) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestUnverified)) default: if verificationError == digest.ErrDigestInvalidFormat { - imh.Errors.Push(v2.ErrorCodeDigestInvalid) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid)) } else { - imh.Errors.PushErr(verificationError) + imh.Errors = append(imh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, verificationError)) } } } default: - imh.Errors.PushErr(err) + imh.Errors = append(imh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) } return @@ -171,7 +172,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h // tag index entries a serious problem in eventually consistent storage. // Once we work out schema version 2, the full deletion system will be // worked out and we can add support back. - imh.Errors.Push(v2.ErrorCodeUnsupported) + imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeUnsupported)) } // digestManifest takes a digest of the given manifest. This belongs somewhere diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 44b12dfd..e1846cf9 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) @@ -39,9 +40,9 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()}) + th.Errors = append(th.Errors, errcode.NewError(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()})) default: - th.Errors.PushErr(err) + th.Errors = append(th.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) } return } @@ -53,7 +54,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { Name: th.Repository.Name(), Tags: tags, }); err != nil { - th.Errors.PushErr(err) + th.Errors = append(th.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) return } } From 589123441b4fdee54af9d5d235adadc94519223a Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Sat, 30 May 2015 18:19:23 -0700 Subject: [PATCH 142/501] fixed typos Signed-off-by: Phil Misiowiec --- docs/handlers/app.go | 2 +- docs/storage/blob_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 1d58e945..561c45cb 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -122,7 +122,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { - ctxu.GetLogger(app).Warnf("unkown cache type %q, caching disabled", configuration.Storage["cache"]) + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) } } } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 114e686f..569f756d 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -59,7 +59,7 @@ func TestSimpleBlobUpload(t *testing.T) { // Do a resume, get unknown upload blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != distribution.ErrBlobUploadUnknown { - t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) + t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) } // Restart! From 9d7c6923c19c2afd3e1553a81dbfbc4d99803765 Mon Sep 17 00:00:00 2001 From: xiekeyang Date: Fri, 17 Apr 2015 20:19:20 +0800 Subject: [PATCH 143/501] Feature: Web Panic Reporting via hooks This PR is for issue of "email after registry webapp panic" #41, improving my previous design (closed). It use self setting up hooks, to catch panic in web application. And, send email in hooks handle directly, to no use new http server and handler. Signed-off-by: xiekeyang --- docs/handlers/app.go | 27 +++++++++++++++++++++ docs/handlers/hooks.go | 53 ++++++++++++++++++++++++++++++++++++++++++ docs/handlers/mail.go | 45 +++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+) create mode 100644 docs/handlers/hooks.go create mode 100644 docs/handlers/mail.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 1d58e945..77e3a956 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -9,6 +9,7 @@ import ( "os" "time" + log "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" @@ -101,6 +102,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureEvents(&configuration) app.configureRedis(&configuration) + app.configureLogHook(&configuration) // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { @@ -291,6 +293,31 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { })) } +// configureLogHook prepares logging hook parameters. +func (app *App) configureLogHook(configuration *configuration.Configuration) { + logger := ctxu.GetLogger(app).(*log.Entry).Logger + for _, configHook := range configuration.Log.Hooks { + if !configHook.Disabled { + switch configHook.Type { + case "mail": + hook := &logHook{} + hook.LevelsParam = configHook.Levels + hook.Mail = &mailer{ + Addr: configHook.MailOptions.SMTP.Addr, + Username: configHook.MailOptions.SMTP.Username, + Password: configHook.MailOptions.SMTP.Password, + Insecure: configHook.MailOptions.SMTP.Insecure, + From: configHook.MailOptions.From, + To: configHook.MailOptions.To, + } + logger.Hooks.Add(hook) + default: + } + } + } + app.Context = ctxu.WithLogger(app.Context, logger) +} + func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. diff --git a/docs/handlers/hooks.go b/docs/handlers/hooks.go new file mode 100644 index 00000000..7bbab4f8 --- /dev/null +++ b/docs/handlers/hooks.go @@ -0,0 +1,53 @@ +package handlers + +import ( + "bytes" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/Sirupsen/logrus" +) + +// logHook is for hooking Panic in web application +type logHook struct { + LevelsParam []string + Mail *mailer +} + +// Fire forwards an error to LogHook +func (hook *logHook) Fire(entry *logrus.Entry) error { + addr := strings.Split(hook.Mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) + + html := ` + {{.Message}} + + {{range $key, $value := .Data}} + {{$key}}: {{$value}} + {{end}} + ` + b := bytes.NewBuffer(make([]byte, 0)) + t := template.Must(template.New("mail body").Parse(html)) + if err := t.Execute(b, entry); err != nil { + return err + } + body := fmt.Sprintf("%s", b) + + return hook.Mail.sendMail(subject, body) +} + +// Levels contains hook levels to be catched +func (hook *logHook) Levels() []logrus.Level { + levels := []logrus.Level{} + for _, v := range hook.LevelsParam { + lv, _ := logrus.ParseLevel(v) + levels = append(levels, lv) + } + return levels +} diff --git a/docs/handlers/mail.go b/docs/handlers/mail.go new file mode 100644 index 00000000..39244909 --- /dev/null +++ b/docs/handlers/mail.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "errors" + "net/smtp" + "strings" +) + +// mailer provides fields of email configuration for sending. +type mailer struct { + Addr, Username, Password, From string + Insecure bool + To []string +} + +// sendMail allows users to send email, only if mail parameters is configured correctly. +func (mail *mailer) sendMail(subject, message string) error { + addr := strings.Split(mail.Addr, ":") + if len(addr) != 2 { + return errors.New("Invalid Mail Address") + } + host := addr[0] + msg := []byte("To:" + strings.Join(mail.To, ";") + + "\r\nFrom: " + mail.From + + "\r\nSubject: " + subject + + "\r\nContent-Type: text/plain\r\n\r\n" + + message) + auth := smtp.PlainAuth( + "", + mail.Username, + mail.Password, + host, + ) + err := smtp.SendMail( + mail.Addr, + auth, + mail.From, + mail.To, + []byte(msg), + ) + if err != nil { + return err + } + return nil +} From 4bc53818cb1423f19a9449c8979f467aea816819 Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Mon, 1 Jun 2015 09:57:40 +0000 Subject: [PATCH 144/501] Fix rados build, remove uuid dependency Signed-off-by: Vincent Giersch --- docs/storage/driver/rados/rados.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index 999b06b0..9bac8fc3 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -9,12 +9,12 @@ import ( "path" "strconv" - "code.google.com/p/go-uuid/uuid" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/uuid" "github.com/noahdesu/go-ceph/rados" ) @@ -497,7 +497,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int // Generate a blob identifier func (d *driver) generateOid() string { - return objectBlobPrefix + uuid.New() + return objectBlobPrefix + uuid.Generate().String() } // Reference a object and its hierarchy From 040d7038b88ae8114d21bb9b6ff705372b1c22ee Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Mon, 1 Jun 2015 15:14:03 +0000 Subject: [PATCH 145/501] doc: coherence between requests and parameters + typo In the request parameters lists `tag` was used instead of `reference` present in the HTTP requests paths Signed-off-by: Vincent Giersch --- docs/api/v2/descriptors.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index d7c4a880..e08c1324 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -16,12 +16,12 @@ var ( Description: `Name of the target repository.`, } - tagParameterDescriptor = ParameterDescriptor{ - Name: "tag", + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", Type: "string", Format: TagNameRegexp.String(), Required: true, - Description: `Tag of the target manifiest.`, + Description: `Tag or digest of the target manifest.`, } uuidParameterDescriptor = ParameterDescriptor{ @@ -476,7 +476,7 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { @@ -542,7 +542,7 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -648,7 +648,7 @@ var routeDescriptors = []RouteDescriptor{ }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, - tagParameterDescriptor, + referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { From b8b16b78f4fe510e4f0b9310957aba6675bcd623 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Tue, 26 May 2015 18:16:45 -0700 Subject: [PATCH 146/501] Round 3 - Add Register function Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 97 ++++++++++-------- docs/api/errcode/errors_test.go | 156 ++++++++++++----------------- docs/api/v2/descriptors.go | 5 - docs/api/v2/errors.go | 168 ++++++++++++-------------------- docs/handlers/app.go | 1 + docs/handlers/app_test.go | 8 +- docs/handlers/helpers.go | 3 +- 7 files changed, 187 insertions(+), 251 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index c46670a1..4285dedc 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" "strings" + "sync" ) // ErrorCode represents the error type. The errors are serialized via strings @@ -36,49 +37,70 @@ type ErrorDescriptor struct { var ( errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} ) -const ( - // ErrorCodeUnknown is a catch-all for errors not defined below. - ErrorCodeUnknown ErrorCode = 10000 + iota -) - -var errorDescriptors = []ErrorDescriptor{ - { - Code: ErrorCodeUnknown, - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an +// ErrorCodeUnknown is a generic error that can be used as a last +// resort if there is no situation-specific error message that can be used +var ErrorCodeUnknown = Register("registry.api.errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }, -} + HTTPStatusCode: http.StatusInternalServerError, +}) -// LoadErrors will register a new set of Errors into the system -func LoadErrors(errs []ErrorDescriptor) { - for _, descriptor := range errs { - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %s is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %d is already registered", descriptor.Code)) - } +var nextCode = 1000 +var registerLock sync.Mutex - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + code := ErrorCode(nextCode) + + descriptor.Code = code + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %s is already registered", descriptor.Value)) } -} - -// ParseErrorCode attempts to parse the error code string, returning -// ErrorCodeUnknown if the error is not known. -func ParseErrorCode(s string) ErrorCode { - desc, ok := idToDescriptors[s] - - if !ok { - return ErrorCodeUnknown + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %d is already registered", descriptor.Code)) } - return desc.Code + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return code +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + return groupToDescriptors[name] } // Descriptor returns the descriptor for the error code. @@ -183,8 +205,3 @@ func (errs Errors) Error() string { func (errs Errors) Len() int { return len(errs) } - -// init loads the default errors that are part of the errcode package -func init() { - LoadErrors(errorDescriptors) -} diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index eedb22ed..aaf0d73b 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -2,67 +2,86 @@ package errcode import ( "encoding/json" - // "reflect" + "net/http" + "reflect" "testing" - - // "github.com/docker/distribution/digest" ) // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { - for _, desc := range errorDescriptors { - if desc.Code.String() != desc.Value { - t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) + if len(errorCodeToDescriptors) == 0 { + t.Fatal("errors aren't loaded!") + } + + for ec, desc := range errorCodeToDescriptors { + if ec != desc.Code { + t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) } - if desc.Code.Message() != desc.Message { - t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) + if idToDescriptors[desc.Value].Code != ec { + t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) } - // Serialize the error code using the json library to ensure that we - // get a string and it works round trip. - p, err := json.Marshal(desc.Code) + if ec.Message() != desc.Message { + t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) + } + // Test (de)serializing the ErrorCode + p, err := json.Marshal(ec) if err != nil { - t.Fatalf("error marshaling error code %v: %v", desc.Code, err) + t.Fatalf("couldn't marshal ec %v: %v", ec, err) } if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", desc.Code) + t.Fatalf("expected content in marshaled before for error code %v", ec) } // First, unmarshal to interface and ensure we have a string. var ecUnspecified interface{} if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + t.Fatalf("error unmarshaling error code %v: %v", ec, err) } if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) } // Now, unmarshal with the error code type and ensure they are equal var ecUnmarshaled ErrorCode if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + t.Fatalf("error unmarshaling error code %v: %v", ec, err) } - if ecUnmarshaled != desc.Code { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) } } + } // TestErrorsManagement does a quick check of the Errors type to ensure that // members are properly pushed and marshaled. -/* +var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + func TestErrorsManagement(t *testing.T) { var errs Errors - errs.Push(ErrorCodeDigestInvalid) - errs.Push(ErrorCodeBlobUnknown, - map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) + errs = append(errs, NewError(ErrorCodeTest1)) + errs = append(errs, NewError(ErrorCodeTest2, + map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) p, err := json.Marshal(errs) @@ -70,15 +89,25 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" + expectedJSON := "[{\"code\":\"TEST1\"},{\"code\":\"TEST2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]" if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) } - errs.Clear() - errs.Push(ErrorCodeUnknown) - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + // Now test the reverse + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) + } + + // Test again with a single value this time + errs = Errors{NewError(ErrorCodeUnknown)} + expectedJSON = "[{\"code\":\"UNKNOWN\"}]" p, err = json.Marshal(errs) if err != nil { @@ -88,80 +117,15 @@ func TestErrorsManagement(t *testing.T) { if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) } -} -// TestMarshalUnmarshal ensures that api errors can round trip through json -// without losing information. -func TestMarshalUnmarshal(t *testing.T) { - - var errors Errors - - for _, testcase := range []struct { - description string - err Error - }{ - { - description: "unknown error", - err: Error{ - - Code: ErrorCodeUnknown, - Message: ErrorCodeUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeManifestUnknown, - Message: ErrorCodeManifestUnknown.Descriptor().Message, - }, - }, - { - description: "unknown manifest", - err: Error{ - Code: ErrorCodeBlobUnknown, - Message: ErrorCodeBlobUnknown.Descriptor().Message, - Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, - }, - }, - } { - fatalf := func(format string, args ...interface{}) { - t.Fatalf(testcase.description+": "+format, args...) - } - - unexpectedErr := func(err error) { - fatalf("unexpected error: %v", err) - } - - p, err := json.Marshal(testcase.err) - if err != nil { - unexpectedErr(err) - } - - var unmarshaled Error - if err := json.Unmarshal(p, &unmarshaled); err != nil { - unexpectedErr(err) - } - - if !reflect.DeepEqual(unmarshaled, testcase.err) { - fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) - } - - // Roll everything up into an error response envelope. - errors.PushErr(testcase.err) - } - - p, err := json.Marshal(errors) - if err != nil { - t.Fatalf("unexpected error marshaling error envelope: %v", err) - } - - var unmarshaled Errors + // Now test the reverse + unmarshaled = nil if err := json.Unmarshal(p, &unmarshaled); err != nil { t.Fatalf("unexpected error unmarshaling error envelope: %v", err) } - if !reflect.DeepEqual(unmarshaled, errors) { - t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) + if !reflect.DeepEqual(unmarshaled, errs) { + t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) } + } -*/ diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index db5a9270..d90bbb09 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -172,13 +172,8 @@ const ( var APIDescriptor = struct { // RouteDescriptors provides a list of the routes available in the API. RouteDescriptors []RouteDescriptor - - // ErrorDescriptors provides a list of the error codes and their - // associated documentation and metadata. - ErrorDescriptors []errcode.ErrorDescriptor }{ RouteDescriptors: routeDescriptors, - ErrorDescriptors: errorDescriptors, } // RouteDescriptor describes a route specified by name. diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index 9655dba8..c12cbc1c 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -6,81 +6,28 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) -const ( +var ( // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = iota - - // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized - - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - // size does not match the content length. - ErrorCodeSizeInvalid - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. - ErrorCodeManifestUnverified - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid -) - -// ErrorDescriptors provides a list of HTTP API Error codes that may be -// encountered when interacting with the registry API. -var errorDescriptors = []errcode.ErrorDescriptor{ - { - Code: ErrorCodeUnsupported, + ErrorCodeUnsupported = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing implementation or invalid set of parameters.`, - }, - { - Code: ErrorCodeUnauthorized, + }) + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "access to the requested resource is not authorized", Description: `The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status.`, HTTPStatusCode: http.StatusForbidden, - }, - { - Code: ErrorCodeDigestInvalid, + }) + + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that @@ -89,50 +36,60 @@ var errorDescriptors = []errcode.ErrorDescriptor{ invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeSizeInvalid, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeNameInvalid, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "NAME_INVALID", Message: "invalid repository name", Description: `Invalid repository name encountered either during manifest validation or any API operation.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeTagInvalid, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeNameUnknown, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, HTTPStatusCode: http.StatusNotFound, - }, - { - Code: ErrorCodeManifestUnknown, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, HTTPStatusCode: http.StatusNotFound, - }, - { - Code: ErrorCodeManifestInvalid, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring @@ -140,25 +97,32 @@ var errorDescriptors = []errcode.ErrorDescriptor{ more specific error is included. The detail will contain information the failed validation.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeManifestUnverified, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verfication. + ErrorCodeManifestUnverified = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeManifestBlobUnknown, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a manifest blob is unknown to the registry.`, HTTPStatusCode: http.StatusBadRequest, - }, - { - Code: ErrorCodeBlobUnknown, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the @@ -166,27 +130,23 @@ var errorDescriptors = []errcode.ErrorDescriptor{ standard get or if a manifest references an unknown layer during upload.`, HTTPStatusCode: http.StatusNotFound, - }, + }) - { - Code: ErrorCodeBlobUploadUnknown, + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, HTTPStatusCode: http.StatusNotFound, - }, - { - Code: ErrorCodeBlobUploadInvalid, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_INVALID", Message: "blob upload invalid", Description: `The blob upload encountered an error and can no longer proceed.`, HTTPStatusCode: http.StatusNotFound, - }, -} - -// init registers our errors with the errcode system -func init() { - errcode.LoadErrors(errorDescriptors) -} + }) +) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 12c6e227..0ef7d4ca 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -452,6 +452,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont if err != nil { switch err := err.(type) { case auth.Challenge: + // NOTE(duglin): // Since err.ServeHTTP will set the HTTP status code for us // we need to set the content-type here. The serveJSON // func will try to do it but it'll be too late at that point. diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 0520cb40..d98ae400 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -186,11 +186,9 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected status code during request: %v", err) } - /* - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") - } - */ + if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + } expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index f4f24175..656d2066 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -2,9 +2,10 @@ package handlers import ( "encoding/json" - "github.com/docker/distribution/registry/api/errcode" "io" "net/http" + + "github.com/docker/distribution/registry/api/errcode" ) // serveJSON marshals v and sets the content-type header to From 630334b304de82b0f8694ed9d8599579933a8d8c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 4 Jun 2015 16:12:35 -0700 Subject: [PATCH 147/501] Add more repository name validation test cases Signed-off-by: Stephen J Day --- docs/api/v2/names_test.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index d1dd2b48..0975fb7c 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -73,6 +73,42 @@ func TestRepositoryNameRegexp(t *testing.T) { input: strings.Repeat("a", 256), err: ErrRepositoryNameLong, }, + { + input: "-foo/bar", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "foo/bar-", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "foo-/bar", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "foo/-bar", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "_foo/bar", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "foo/bar_", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "____/____", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "_docker/_docker", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "docker_/docker_", + err: ErrRepositoryNameComponentInvalid, + }, } { failf := func(format string, v ...interface{}) { From 60967cbd6b68a7c839b3e73cf0871c279c79d601 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 4 Jun 2015 18:59:08 -0700 Subject: [PATCH 148/501] Clarify that manifests can only be deleted by digest Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index e08c1324..7020fed9 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -639,7 +639,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -657,8 +657,8 @@ var routeDescriptors = []RouteDescriptor{ }, Failures: []ResponseDescriptor{ { - Name: "Invalid Name or Tag", - Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, ErrorCodes: []ErrorCode{ ErrorCodeNameInvalid, @@ -690,7 +690,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: "Unknown Manifest", - Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, ErrorCodes: []ErrorCode{ ErrorCodeNameUnknown, From 60262521bd2122e1b98554587891d88baa557d29 Mon Sep 17 00:00:00 2001 From: BadZen Date: Tue, 21 Apr 2015 19:57:12 +0000 Subject: [PATCH 149/501] Implementation of a basic authentication scheme using standard .htpasswd files Signed-off-by: BadZen Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 112 +++++++++++++++++++++++++++++++++ docs/auth/basic/access_test.go | 100 +++++++++++++++++++++++++++++ docs/auth/basic/htpasswd.go | 49 +++++++++++++++ 3 files changed, 261 insertions(+) create mode 100644 docs/auth/basic/access.go create mode 100644 docs/auth/basic/access_test.go create mode 100644 docs/auth/basic/htpasswd.go diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go new file mode 100644 index 00000000..1833296a --- /dev/null +++ b/docs/auth/basic/access.go @@ -0,0 +1,112 @@ +// Package basic provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// The use of SHA hashes (htpasswd -s) is enforced since MD5 is insecure and simple +// system crypt() may be as well. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. + +package basic + +import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +type accessController struct { + realm string + htpasswd *HTPasswd +} + +type challenge struct { + realm string + err error +} + +var _ auth.AccessController = &accessController{} +var ( + ErrPasswordRequired = errors.New("authorization credential required") + ErrInvalidCredential = errors.New("invalid authorization credential") +) + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for basic access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for basic access controller`) + } + + return &accessController{realm: realm.(string), htpasswd: NewHTPasswd(path.(string))}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := ctxu.GetRequest(ctx) + if err != nil { + return nil, err + } + + authHeader := req.Header.Get("Authorization") + + if authHeader == "" { + challenge := challenge{ + realm: ac.realm, + } + return nil, &challenge + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + challenge := challenge{ + realm: ac.realm, + } + + if len(parts) != 2 || strings.ToLower(parts[0]) != "basic" { + challenge.err = ErrPasswordRequired + return nil, &challenge + } + + text, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + challenge.err = ErrInvalidCredential + return nil, &challenge + } + + credential := strings.Split(string(text), ":") + if len(credential) != 2 { + challenge.err = ErrInvalidCredential + return nil, &challenge + } + + if res, _ := ac.htpasswd.AuthenticateUser(credential[0], credential[1]); !res { + challenge.err = ErrInvalidCredential + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: credential[0]}), nil +} + +func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + header := fmt.Sprintf("Realm realm=%q", ch.realm) + w.Header().Set("WWW-Authenticate", header) + w.WriteHeader(http.StatusUnauthorized) +} + +func (ch *challenge) Error() string { + return fmt.Sprintf("basic authentication challenge: %#v", ch) +} + +func init() { + auth.Register("basic", auth.InitFunc(newAccessController)) +} diff --git a/docs/auth/basic/access_test.go b/docs/auth/basic/access_test.go new file mode 100644 index 00000000..d82573b9 --- /dev/null +++ b/docs/auth/basic/access_test.go @@ -0,0 +1,100 @@ +package basic + +import ( + "encoding/base64" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/registry/auth" + "golang.org/x/net/context" +) + +func TestBasicAccessController(t *testing.T) { + + testRealm := "The-Shire" + testUser := "bilbo" + testHtpasswdContent := "bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs=" + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(nil, "http.request", r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.ServeHTTP(w, r) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUser { + t.Fatalf("expected user name %q, got %q", testUser, userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, _ = http.NewRequest("GET", server.URL, nil) + + sekrit := "bilbo:baggins" + credential := "Basic " + base64.StdEncoding.EncodeToString([]byte(sekrit)) + + req.Header.Set("Authorization", credential) + resp, err = client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } + +} diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go new file mode 100644 index 00000000..6833bc5c --- /dev/null +++ b/docs/auth/basic/htpasswd.go @@ -0,0 +1,49 @@ +package basic + +import ( + "crypto/sha1" + "encoding/base64" + "encoding/csv" + "errors" + "os" +) + +var ErrSHARequired = errors.New("htpasswd file must use SHA (htpasswd -s)") + +type HTPasswd struct { + path string + reader *csv.Reader +} + +func NewHTPasswd(htpath string) *HTPasswd { + return &HTPasswd{path: htpath} +} + +func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error) { + + // Hash the credential. + sha := sha1.New() + sha.Write([]byte(pwd)) + hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) + + // Open the file. + in, err := os.Open(htpasswd.path) + if err != nil { + return false, err + } + + // Parse the contents of the standard .htpasswd until we hit the end or find a match. + reader := csv.NewReader(in) + reader.Comma = ':' + reader.Comment = '#' + reader.TrimLeadingSpace = true + for entry, readerr := reader.Read(); entry != nil || readerr != nil; entry, readerr = reader.Read() { + if entry[0] == user { + if len(entry[1]) < 6 || entry[1][0:5] != "{SHA}" { + return false, ErrSHARequired + } + return entry[1][5:] == hash, nil + } + } + return false, nil +} From 7733b6c892ebf15b603385152ba0a526c5c2af94 Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Wed, 22 Apr 2015 14:35:59 +0000 Subject: [PATCH 150/501] Fixed WWW-Authenticate: header, added example config and import into main, fixed golint warnings Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 5 +++-- docs/auth/basic/htpasswd.go | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 1833296a..76f036c0 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -6,7 +6,6 @@ // system crypt() may be as well. // // This authentication method MUST be used under TLS, as simple token-replay attack is possible. - package basic import ( @@ -33,7 +32,9 @@ type challenge struct { var _ auth.AccessController = &accessController{} var ( + // ErrPasswordRequired - returned when no auth token is given. ErrPasswordRequired = errors.New("authorization credential required") + // ErrInvalidCredential - returned when the auth token does not authenticate correctly. ErrInvalidCredential = errors.New("invalid authorization credential") ) @@ -98,7 +99,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut } func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - header := fmt.Sprintf("Realm realm=%q", ch.realm) + header := fmt.Sprintf("Basic realm=%q", ch.realm) w.Header().Set("WWW-Authenticate", header) w.WriteHeader(http.StatusUnauthorized) } diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index 6833bc5c..36eca347 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -8,17 +8,22 @@ import ( "os" ) +// ErrSHARequired - returned in error field of challenge when the htpasswd was not made using SHA1 algorithm. +// (SHA1 is considered obsolete but the alternative for htpasswd is MD5, or system crypt...) var ErrSHARequired = errors.New("htpasswd file must use SHA (htpasswd -s)") +// HTPasswd - holds a path to a system .htpasswd file and the machinery to parse it. type HTPasswd struct { path string reader *csv.Reader } +// NewHTPasswd - Create a new HTPasswd with the given path to .htpasswd file. func NewHTPasswd(htpath string) *HTPasswd { return &HTPasswd{path: htpath} } +// AuthenticateUser - Check a given user:password credential against the receiving HTPasswd's file. func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error) { // Hash the credential. From d2b7988b7f18568112ae65f4382d40e0f7388114 Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Wed, 22 Apr 2015 15:13:48 +0000 Subject: [PATCH 151/501] Aligned formatting with gofmt Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 76f036c0..dd792374 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -33,8 +33,8 @@ type challenge struct { var _ auth.AccessController = &accessController{} var ( // ErrPasswordRequired - returned when no auth token is given. - ErrPasswordRequired = errors.New("authorization credential required") - // ErrInvalidCredential - returned when the auth token does not authenticate correctly. + ErrPasswordRequired = errors.New("authorization credential required") + // ErrInvalidCredential - returned when the auth token does not authenticate correctly. ErrInvalidCredential = errors.New("invalid authorization credential") ) From ff67393b2b47608c654559fdd51d3c3fe9ee2b5c Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Thu, 4 Jun 2015 11:46:34 -0400 Subject: [PATCH 152/501] Added support for bcrypt, plaintext; extension points for other htpasswd hash methods. Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 38 ++++---------- docs/auth/basic/access_test.go | 48 ++++++++++------- docs/auth/basic/htpasswd.go | 95 ++++++++++++++++++++++++++++++---- 3 files changed, 123 insertions(+), 58 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index dd792374..81a22b40 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -9,11 +9,9 @@ package basic import ( - "encoding/base64" "errors" "fmt" "net/http" - "strings" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" @@ -58,8 +56,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, err } - authHeader := req.Header.Get("Authorization") - + authHeader := req.Header.Get("Authorization") if authHeader == "" { challenge := challenge{ realm: ac.realm, @@ -67,35 +64,20 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, &challenge } - parts := strings.Split(req.Header.Get("Authorization"), " ") - - challenge := challenge{ - realm: ac.realm, + user, pass, ok := req.BasicAuth() + if !ok { + return nil, errors.New("Invalid Authorization header") } - - if len(parts) != 2 || strings.ToLower(parts[0]) != "basic" { - challenge.err = ErrPasswordRequired - return nil, &challenge - } - - text, err := base64.StdEncoding.DecodeString(parts[1]) - if err != nil { + + if res, _ := ac.htpasswd.AuthenticateUser(user, pass); !res { + challenge := challenge{ + realm: ac.realm, + } challenge.err = ErrInvalidCredential return nil, &challenge } - credential := strings.Split(string(text), ":") - if len(credential) != 2 { - challenge.err = ErrInvalidCredential - return nil, &challenge - } - - if res, _ := ac.htpasswd.AuthenticateUser(credential[0], credential[1]); !res { - challenge.err = ErrInvalidCredential - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: credential[0]}), nil + return auth.WithUser(ctx, auth.UserInfo{Name: user}), nil } func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { diff --git a/docs/auth/basic/access_test.go b/docs/auth/basic/access_test.go index d82573b9..b731675e 100644 --- a/docs/auth/basic/access_test.go +++ b/docs/auth/basic/access_test.go @@ -14,8 +14,13 @@ import ( func TestBasicAccessController(t *testing.T) { testRealm := "The-Shire" - testUser := "bilbo" - testHtpasswdContent := "bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs=" + testUsers := []string{"bilbo","frodo","MiShil","DeokMan"} + testPasswords := []string{"baggins","baggins","새주","공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + tempFile, err := ioutil.TempFile("", "htpasswd-test") if err != nil { @@ -36,7 +41,9 @@ func TestBasicAccessController(t *testing.T) { } tempFile.Close() - + + var userNumber = 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(nil, "http.request", r) authCtx, err := accessController.Authorized(ctx) @@ -55,8 +62,8 @@ func TestBasicAccessController(t *testing.T) { t.Fatal("basic accessController did not set auth.user context") } - if userInfo.Name != testUser { - t.Fatalf("expected user name %q, got %q", testUser, userInfo.Name) + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) } w.WriteHeader(http.StatusNoContent) @@ -79,22 +86,25 @@ func TestBasicAccessController(t *testing.T) { t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) } - req, _ = http.NewRequest("GET", server.URL, nil) + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, _ = http.NewRequest("GET", server.URL, nil) + sekrit := testUsers[i]+":"+testPasswords[i] + credential := "Basic " + base64.StdEncoding.EncodeToString([]byte(sekrit)) - sekrit := "bilbo:baggins" - credential := "Basic " + base64.StdEncoding.EncodeToString([]byte(sekrit)) + req.Header.Set("Authorization", credential) + resp, err = client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() - req.Header.Set("Authorization", credential) - resp, err = client.Do(req) - - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected non-success response status: %v != %v", resp.StatusCode, http.StatusNoContent) + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i], credential) + } } + } diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index 36eca347..69dae9d8 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -6,11 +6,14 @@ import ( "encoding/csv" "errors" "os" + "regexp" + "strings" + + "golang.org/x/crypto/bcrypt" ) -// ErrSHARequired - returned in error field of challenge when the htpasswd was not made using SHA1 algorithm. -// (SHA1 is considered obsolete but the alternative for htpasswd is MD5, or system crypt...) -var ErrSHARequired = errors.New("htpasswd file must use SHA (htpasswd -s)") +// AuthenticationFailureErr - a generic error message for authentication failure to be presented to agent. +var AuthenticationFailureErr = errors.New("Bad username or password") // HTPasswd - holds a path to a system .htpasswd file and the machinery to parse it. type HTPasswd struct { @@ -18,18 +21,57 @@ type HTPasswd struct { reader *csv.Reader } +// AuthType represents a particular hash function used in the htpasswd file. +type AuthType int +const ( + PlainText AuthType = iota + SHA1 + ApacheMD5 + BCrypt + Crypt +) + +// String returns a text representation of the AuthType +func (at AuthType) String() string { + switch(at) { + case PlainText: return "plaintext" + case SHA1: return "sha1" + case ApacheMD5: return "md5" + case BCrypt: return "bcrypt" + case Crypt: return "system crypt" + } + return "unknown" +} + + // NewHTPasswd - Create a new HTPasswd with the given path to .htpasswd file. func NewHTPasswd(htpath string) *HTPasswd { return &HTPasswd{path: htpath} } +var bcryptPrefixRegexp *regexp.Regexp = regexp.MustCompile(`^\$2[ab]?y\$`) + +// GetAuthCredentialType - Inspect an htpasswd file credential and guess the encryption algorithm used. +func GetAuthCredentialType(cred string) AuthType { + if strings.HasPrefix(cred, "{SHA}") { + return SHA1 + } + if strings.HasPrefix(cred, "$apr1$") { + return ApacheMD5 + } + if bcryptPrefixRegexp.MatchString(cred) { + return BCrypt + } + // There's just not a great way to distinguish between these next two... + if len(cred) == 13 { + return Crypt + } + return PlainText +} + // AuthenticateUser - Check a given user:password credential against the receiving HTPasswd's file. func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error) { - // Hash the credential. - sha := sha1.New() - sha.Write([]byte(pwd)) - hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) // Open the file. in, err := os.Open(htpasswd.path) @@ -43,12 +85,43 @@ func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error reader.Comment = '#' reader.TrimLeadingSpace = true for entry, readerr := reader.Read(); entry != nil || readerr != nil; entry, readerr = reader.Read() { + if readerr != nil { + return false, readerr + } + if len(entry) == 0 { + continue + } if entry[0] == user { - if len(entry[1]) < 6 || entry[1][0:5] != "{SHA}" { - return false, ErrSHARequired + credential := entry[1] + credType := GetAuthCredentialType(credential) + switch(credType) { + case SHA1: { + sha := sha1.New() + sha.Write([]byte(pwd)) + hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) + return entry[1][5:] == hash, nil + } + case ApacheMD5: { + return false, errors.New(ApacheMD5.String()+" htpasswd hash function not yet supported") + } + case BCrypt: { + err := bcrypt.CompareHashAndPassword([]byte(credential),[]byte(pwd)) + if err != nil { + return false, err + } + return true, nil + } + case Crypt: { + return false, errors.New(Crypt.String()+" htpasswd hash function not yet supported") + } + case PlainText: { + if pwd == credential { + return true, nil + } + return false, AuthenticationFailureErr + } } - return entry[1][5:] == hash, nil } } - return false, nil + return false, AuthenticationFailureErr } From 15bbde99c1cbb7ecd21e6ea310bd592a45fb2125 Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Thu, 4 Jun 2015 12:02:13 -0400 Subject: [PATCH 153/501] Fixed golint, gofmt warning advice. Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 4 +-- docs/auth/basic/access_test.go | 14 ++++---- docs/auth/basic/htpasswd.go | 64 +++++++++++++++++++++------------- 3 files changed, 47 insertions(+), 35 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 81a22b40..0b3e2788 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -56,7 +56,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, err } - authHeader := req.Header.Get("Authorization") + authHeader := req.Header.Get("Authorization") if authHeader == "" { challenge := challenge{ realm: ac.realm, @@ -68,7 +68,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut if !ok { return nil, errors.New("Invalid Authorization header") } - + if res, _ := ac.htpasswd.AuthenticateUser(user, pass); !res { challenge := challenge{ realm: ac.realm, diff --git a/docs/auth/basic/access_test.go b/docs/auth/basic/access_test.go index b731675e..62699a63 100644 --- a/docs/auth/basic/access_test.go +++ b/docs/auth/basic/access_test.go @@ -14,13 +14,12 @@ import ( func TestBasicAccessController(t *testing.T) { testRealm := "The-Shire" - testUsers := []string{"bilbo","frodo","MiShil","DeokMan"} - testPasswords := []string{"baggins","baggins","새주","공주님"} + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 DeokMan:공주님` - tempFile, err := ioutil.TempFile("", "htpasswd-test") if err != nil { @@ -41,9 +40,9 @@ func TestBasicAccessController(t *testing.T) { } tempFile.Close() - + var userNumber = 0 - + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(nil, "http.request", r) authCtx, err := accessController.Authorized(ctx) @@ -89,12 +88,12 @@ func TestBasicAccessController(t *testing.T) { for i := 0; i < len(testUsers); i++ { userNumber = i req, _ = http.NewRequest("GET", server.URL, nil) - sekrit := testUsers[i]+":"+testPasswords[i] + sekrit := testUsers[i] + ":" + testPasswords[i] credential := "Basic " + base64.StdEncoding.EncodeToString([]byte(sekrit)) req.Header.Set("Authorization", credential) resp, err = client.Do(req) - + if err != nil { t.Fatalf("unexpected error during GET: %v", err) } @@ -105,6 +104,5 @@ func TestBasicAccessController(t *testing.T) { t.Fatalf("unexpected non-success response status: %v != %v for %s %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i], credential) } } - } diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index 69dae9d8..89e4b749 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -8,12 +8,12 @@ import ( "os" "regexp" "strings" - + "golang.org/x/crypto/bcrypt" ) // AuthenticationFailureErr - a generic error message for authentication failure to be presented to agent. -var AuthenticationFailureErr = errors.New("Bad username or password") +var ErrAuthenticationFailure = errors.New("Bad username or password") // HTPasswd - holds a path to a system .htpasswd file and the machinery to parse it. type HTPasswd struct { @@ -22,34 +22,44 @@ type HTPasswd struct { } // AuthType represents a particular hash function used in the htpasswd file. -type AuthType int +type AuthType int + const ( - PlainText AuthType = iota + // PlainText - Plain-text password storage (htpasswd -p) + PlainText AuthType = iota + // SHA1 - sha hashed password storage (htpasswd -s) SHA1 + // ApacheMD5 - apr iterated md5 hashing (htpasswd -m) ApacheMD5 + // BCrypt - BCrypt adapative password hashing (htpasswd -B) BCrypt + // Crypt - System crypt() hashes. (htpasswd -d) Crypt ) // String returns a text representation of the AuthType func (at AuthType) String() string { - switch(at) { - case PlainText: return "plaintext" - case SHA1: return "sha1" - case ApacheMD5: return "md5" - case BCrypt: return "bcrypt" - case Crypt: return "system crypt" + switch at { + case PlainText: + return "plaintext" + case SHA1: + return "sha1" + case ApacheMD5: + return "md5" + case BCrypt: + return "bcrypt" + case Crypt: + return "system crypt" } return "unknown" } - // NewHTPasswd - Create a new HTPasswd with the given path to .htpasswd file. func NewHTPasswd(htpath string) *HTPasswd { return &HTPasswd{path: htpath} } -var bcryptPrefixRegexp *regexp.Regexp = regexp.MustCompile(`^\$2[ab]?y\$`) +var bcryptPrefixRegexp = regexp.MustCompile(`^\$2[ab]?y\$`) // GetAuthCredentialType - Inspect an htpasswd file credential and guess the encryption algorithm used. func GetAuthCredentialType(cred string) AuthType { @@ -72,7 +82,6 @@ func GetAuthCredentialType(cred string) AuthType { // AuthenticateUser - Check a given user:password credential against the receiving HTPasswd's file. func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error) { - // Open the file. in, err := os.Open(htpasswd.path) if err != nil { @@ -94,34 +103,39 @@ func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error if entry[0] == user { credential := entry[1] credType := GetAuthCredentialType(credential) - switch(credType) { - case SHA1: { + switch credType { + case SHA1: + { sha := sha1.New() sha.Write([]byte(pwd)) hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) return entry[1][5:] == hash, nil } - case ApacheMD5: { - return false, errors.New(ApacheMD5.String()+" htpasswd hash function not yet supported") + case ApacheMD5: + { + return false, errors.New(ApacheMD5.String() + " htpasswd hash function not yet supported") } - case BCrypt: { - err := bcrypt.CompareHashAndPassword([]byte(credential),[]byte(pwd)) + case BCrypt: + { + err := bcrypt.CompareHashAndPassword([]byte(credential), []byte(pwd)) if err != nil { return false, err } return true, nil } - case Crypt: { - return false, errors.New(Crypt.String()+" htpasswd hash function not yet supported") + case Crypt: + { + return false, errors.New(Crypt.String() + " htpasswd hash function not yet supported") } - case PlainText: { + case PlainText: + { if pwd == credential { return true, nil - } - return false, AuthenticationFailureErr + } + return false, ErrAuthenticationFailure } } } } - return false, AuthenticationFailureErr + return false, ErrAuthenticationFailure } From fe9ca88946c54cd14ea3481caaad1541dd2461cf Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Sat, 6 Jun 2015 01:37:32 -0400 Subject: [PATCH 154/501] Removed dashes from comments, unexported htpasswd struct Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 9 +++------ docs/auth/basic/htpasswd.go | 32 ++++++++++++++++---------------- 2 files changed, 19 insertions(+), 22 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 0b3e2788..52b790d2 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -2,9 +2,6 @@ // user credential hash in an htpasswd formatted file in a configuration-determined // location. // -// The use of SHA hashes (htpasswd -s) is enforced since MD5 is insecure and simple -// system crypt() may be as well. -// // This authentication method MUST be used under TLS, as simple token-replay attack is possible. package basic @@ -20,7 +17,7 @@ import ( type accessController struct { realm string - htpasswd *HTPasswd + htpasswd *htpasswd } type challenge struct { @@ -30,9 +27,9 @@ type challenge struct { var _ auth.AccessController = &accessController{} var ( - // ErrPasswordRequired - returned when no auth token is given. + // ErrPasswordRequired Returned when no auth token is given. ErrPasswordRequired = errors.New("authorization credential required") - // ErrInvalidCredential - returned when the auth token does not authenticate correctly. + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. ErrInvalidCredential = errors.New("invalid authorization credential") ) diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index 89e4b749..91d45e77 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -12,32 +12,32 @@ import ( "golang.org/x/crypto/bcrypt" ) -// AuthenticationFailureErr - a generic error message for authentication failure to be presented to agent. +// ErrAuthenticationFailure A generic error message for authentication failure to be presented to agent. var ErrAuthenticationFailure = errors.New("Bad username or password") -// HTPasswd - holds a path to a system .htpasswd file and the machinery to parse it. -type HTPasswd struct { +// htpasswd Holds a path to a system .htpasswd file and the machinery to parse it. +type htpasswd struct { path string reader *csv.Reader } -// AuthType represents a particular hash function used in the htpasswd file. +// AuthType Represents a particular hash function used in the htpasswd file. type AuthType int const ( - // PlainText - Plain-text password storage (htpasswd -p) + // PlainText Plain-text password storage (htpasswd -p) PlainText AuthType = iota - // SHA1 - sha hashed password storage (htpasswd -s) + // SHA1 sha hashed password storage (htpasswd -s) SHA1 - // ApacheMD5 - apr iterated md5 hashing (htpasswd -m) + // ApacheMD5 apr iterated md5 hashing (htpasswd -m) ApacheMD5 - // BCrypt - BCrypt adapative password hashing (htpasswd -B) + // BCrypt BCrypt adapative password hashing (htpasswd -B) BCrypt - // Crypt - System crypt() hashes. (htpasswd -d) + // Crypt System crypt() hashes. (htpasswd -d) Crypt ) -// String returns a text representation of the AuthType +// String Returns a text representation of the AuthType func (at AuthType) String() string { switch at { case PlainText: @@ -54,14 +54,14 @@ func (at AuthType) String() string { return "unknown" } -// NewHTPasswd - Create a new HTPasswd with the given path to .htpasswd file. -func NewHTPasswd(htpath string) *HTPasswd { - return &HTPasswd{path: htpath} +// NewHTPasswd Create a new HTPasswd with the given path to .htpasswd file. +func NewHTPasswd(htpath string) *htpasswd { + return &htpasswd{path: htpath} } var bcryptPrefixRegexp = regexp.MustCompile(`^\$2[ab]?y\$`) -// GetAuthCredentialType - Inspect an htpasswd file credential and guess the encryption algorithm used. +// GetAuthCredentialType Inspect an htpasswd file credential and guess the encryption algorithm used. func GetAuthCredentialType(cred string) AuthType { if strings.HasPrefix(cred, "{SHA}") { return SHA1 @@ -79,8 +79,8 @@ func GetAuthCredentialType(cred string) AuthType { return PlainText } -// AuthenticateUser - Check a given user:password credential against the receiving HTPasswd's file. -func (htpasswd *HTPasswd) AuthenticateUser(user string, pwd string) (bool, error) { +// AuthenticateUser Check a given user:password credential against the receiving HTPasswd's file. +func (htpasswd *htpasswd) AuthenticateUser(user string, pwd string) (bool, error) { // Open the file. in, err := os.Open(htpasswd.path) From 350444568082cfc54a10c30e229b00aeeb0e8e1a Mon Sep 17 00:00:00 2001 From: Dave Trombley Date: Sat, 6 Jun 2015 01:58:45 -0400 Subject: [PATCH 155/501] Unexported function to comply with golint Signed-off-by: Dave Trombley --- docs/auth/basic/access.go | 2 +- docs/auth/basic/htpasswd.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 52b790d2..24f4009f 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -44,7 +44,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, return nil, fmt.Errorf(`"path" must be set for basic access controller`) } - return &accessController{realm: realm.(string), htpasswd: NewHTPasswd(path.(string))}, nil + return &accessController{realm: realm.(string), htpasswd: newHTPasswd(path.(string))}, nil } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index 91d45e77..cc305ff1 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -55,7 +55,7 @@ func (at AuthType) String() string { } // NewHTPasswd Create a new HTPasswd with the given path to .htpasswd file. -func NewHTPasswd(htpath string) *htpasswd { +func newHTPasswd(htpath string) *htpasswd { return &htpasswd{path: htpath} } From 427c457801637e3d5b8a0e1f88cc1095a3f193c9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 8 Jun 2015 18:56:48 -0700 Subject: [PATCH 156/501] Refactor Basic Authentication package This change refactors the basic authentication implementation to better follow Go coding standards. Many types are no longer exported. The parser is now a separate function from the authentication code. The standard functions (*http.Request).BasicAuth/SetBasicAuth are now used where appropriate. Signed-off-by: Stephen J Day --- docs/auth/basic/access.go | 54 +++++---- docs/auth/basic/access_test.go | 19 +-- docs/auth/basic/htpasswd.go | 203 +++++++++++++++++---------------- 3 files changed, 142 insertions(+), 134 deletions(-) diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 24f4009f..11e4ae5a 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -15,23 +15,20 @@ import ( "golang.org/x/net/context" ) +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failured") +) + type accessController struct { realm string htpasswd *htpasswd } -type challenge struct { - realm string - err error -} - var _ auth.AccessController = &accessController{} -var ( - // ErrPasswordRequired Returned when no auth token is given. - ErrPasswordRequired = errors.New("authorization credential required") - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") -) func newAccessController(options map[string]interface{}) (auth.AccessController, error) { realm, present := options["realm"] @@ -53,28 +50,29 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, err } - authHeader := req.Header.Get("Authorization") - if authHeader == "" { - challenge := challenge{ - realm: ac.realm, - } - return nil, &challenge - } - - user, pass, ok := req.BasicAuth() + username, password, ok := req.BasicAuth() if !ok { - return nil, errors.New("Invalid Authorization header") - } - - if res, _ := ac.htpasswd.AuthenticateUser(user, pass); !res { - challenge := challenge{ + return nil, &challenge{ realm: ac.realm, + err: ErrInvalidCredential, } - challenge.err = ErrInvalidCredential - return nil, &challenge } - return auth.WithUser(ctx, auth.UserInfo{Name: user}), nil + if err := ac.htpasswd.authenticateUser(ctx, username, password); err != nil { + ctxu.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error } func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { diff --git a/docs/auth/basic/access_test.go b/docs/auth/basic/access_test.go index 62699a63..3bc99437 100644 --- a/docs/auth/basic/access_test.go +++ b/docs/auth/basic/access_test.go @@ -1,14 +1,13 @@ package basic import ( - "encoding/base64" "io/ioutil" "net/http" "net/http/httptest" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" ) func TestBasicAccessController(t *testing.T) { @@ -33,6 +32,7 @@ func TestBasicAccessController(t *testing.T) { "realm": testRealm, "path": tempFile.Name(), } + ctx := context.Background() accessController, err := newAccessController(options) if err != nil { @@ -44,7 +44,7 @@ func TestBasicAccessController(t *testing.T) { var userNumber = 0 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(nil, "http.request", r) + ctx := context.WithRequest(ctx, r) authCtx, err := accessController.Authorized(ctx) if err != nil { switch err := err.(type) { @@ -87,13 +87,14 @@ func TestBasicAccessController(t *testing.T) { for i := 0; i < len(testUsers); i++ { userNumber = i - req, _ = http.NewRequest("GET", server.URL, nil) - sekrit := testUsers[i] + ":" + testPasswords[i] - credential := "Basic " + base64.StdEncoding.EncodeToString([]byte(sekrit)) + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) - req.Header.Set("Authorization", credential) resp, err = client.Do(req) - if err != nil { t.Fatalf("unexpected error during GET: %v", err) } @@ -101,7 +102,7 @@ func TestBasicAccessController(t *testing.T) { // Request should be authorized if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i], credential) + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) } } diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index cc305ff1..f50805e7 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -1,54 +1,66 @@ package basic import ( + "bufio" "crypto/sha1" "encoding/base64" - "encoding/csv" - "errors" + "io" "os" "regexp" "strings" + "github.com/docker/distribution/context" "golang.org/x/crypto/bcrypt" ) -// ErrAuthenticationFailure A generic error message for authentication failure to be presented to agent. -var ErrAuthenticationFailure = errors.New("Bad username or password") - -// htpasswd Holds a path to a system .htpasswd file and the machinery to parse it. +// htpasswd holds a path to a system .htpasswd file and the machinery to parse it. type htpasswd struct { - path string - reader *csv.Reader + path string } -// AuthType Represents a particular hash function used in the htpasswd file. -type AuthType int +// authType represents a particular hash function used in the htpasswd file. +type authType int const ( - // PlainText Plain-text password storage (htpasswd -p) - PlainText AuthType = iota - // SHA1 sha hashed password storage (htpasswd -s) - SHA1 - // ApacheMD5 apr iterated md5 hashing (htpasswd -m) - ApacheMD5 - // BCrypt BCrypt adapative password hashing (htpasswd -B) - BCrypt - // Crypt System crypt() hashes. (htpasswd -d) - Crypt + authTypePlainText authType = iota // Plain-text password storage (htpasswd -p) + authTypeSHA1 // sha hashed password storage (htpasswd -s) + authTypeApacheMD5 // apr iterated md5 hashing (htpasswd -m) + authTypeBCrypt // BCrypt adapative password hashing (htpasswd -B) + authTypeCrypt // System crypt() hashes. (htpasswd -d) ) +var bcryptPrefixRegexp = regexp.MustCompile(`^\$2[ab]?y\$`) + +// detectAuthCredentialType inspects the credential and resolves the encryption scheme. +func detectAuthCredentialType(cred string) authType { + if strings.HasPrefix(cred, "{SHA}") { + return authTypeSHA1 + } + if strings.HasPrefix(cred, "$apr1$") { + return authTypeApacheMD5 + } + if bcryptPrefixRegexp.MatchString(cred) { + return authTypeBCrypt + } + // There's just not a great way to distinguish between these next two... + if len(cred) == 13 { + return authTypeCrypt + } + return authTypePlainText +} + // String Returns a text representation of the AuthType -func (at AuthType) String() string { +func (at authType) String() string { switch at { - case PlainText: + case authTypePlainText: return "plaintext" - case SHA1: + case authTypeSHA1: return "sha1" - case ApacheMD5: + case authTypeApacheMD5: return "md5" - case BCrypt: + case authTypeBCrypt: return "bcrypt" - case Crypt: + case authTypeCrypt: return "system crypt" } return "unknown" @@ -59,83 +71,80 @@ func newHTPasswd(htpath string) *htpasswd { return &htpasswd{path: htpath} } -var bcryptPrefixRegexp = regexp.MustCompile(`^\$2[ab]?y\$`) - -// GetAuthCredentialType Inspect an htpasswd file credential and guess the encryption algorithm used. -func GetAuthCredentialType(cred string) AuthType { - if strings.HasPrefix(cred, "{SHA}") { - return SHA1 - } - if strings.HasPrefix(cred, "$apr1$") { - return ApacheMD5 - } - if bcryptPrefixRegexp.MatchString(cred) { - return BCrypt - } - // There's just not a great way to distinguish between these next two... - if len(cred) == 13 { - return Crypt - } - return PlainText -} - -// AuthenticateUser Check a given user:password credential against the receiving HTPasswd's file. -func (htpasswd *htpasswd) AuthenticateUser(user string, pwd string) (bool, error) { - +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. Note that +// this parses the htpasswd file on each request so ensure that updates are +// available. +func (htpasswd *htpasswd) authenticateUser(ctx context.Context, username string, password string) error { // Open the file. in, err := os.Open(htpasswd.path) if err != nil { - return false, err + return err + } + defer in.Close() + + for _, entry := range parseHTPasswd(ctx, in) { + if entry.username != username { + continue // wrong entry + } + + switch t := detectAuthCredentialType(entry.password); t { + case authTypeSHA1: + sha := sha1.New() + sha.Write([]byte(password)) + hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) + + if entry.password[5:] != hash { + return ErrAuthenticationFailure + } + + return nil + case authTypeBCrypt: + err := bcrypt.CompareHashAndPassword([]byte(entry.password), []byte(password)) + if err != nil { + return ErrAuthenticationFailure + } + + return nil + case authTypePlainText: + if password != entry.password { + return ErrAuthenticationFailure + } + + return nil + default: + context.GetLogger(ctx).Errorf("unsupported basic authentication type: %v", t) + } } - // Parse the contents of the standard .htpasswd until we hit the end or find a match. - reader := csv.NewReader(in) - reader.Comma = ':' - reader.Comment = '#' - reader.TrimLeadingSpace = true - for entry, readerr := reader.Read(); entry != nil || readerr != nil; entry, readerr = reader.Read() { - if readerr != nil { - return false, readerr - } - if len(entry) == 0 { + return ErrAuthenticationFailure +} + +// htpasswdEntry represents a line in an htpasswd file. +type htpasswdEntry struct { + username string // username, plain text + password string // stores hashed passwd +} + +// parseHTPasswd parses the contents of htpasswd. Bad entries are skipped and +// logged, so this may return empty. This will read all the entries in the +// file, whether or not they are needed. +func parseHTPasswd(ctx context.Context, rd io.Reader) []htpasswdEntry { + entries := []htpasswdEntry{} + scanner := bufio.NewScanner(rd) + for scanner.Scan() { + t := strings.TrimSpace(scanner.Text()) + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + context.GetLogger(ctx).Errorf("bad entry in htpasswd: %q", t) continue } - if entry[0] == user { - credential := entry[1] - credType := GetAuthCredentialType(credential) - switch credType { - case SHA1: - { - sha := sha1.New() - sha.Write([]byte(pwd)) - hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) - return entry[1][5:] == hash, nil - } - case ApacheMD5: - { - return false, errors.New(ApacheMD5.String() + " htpasswd hash function not yet supported") - } - case BCrypt: - { - err := bcrypt.CompareHashAndPassword([]byte(credential), []byte(pwd)) - if err != nil { - return false, err - } - return true, nil - } - case Crypt: - { - return false, errors.New(Crypt.String() + " htpasswd hash function not yet supported") - } - case PlainText: - { - if pwd == credential { - return true, nil - } - return false, ErrAuthenticationFailure - } - } - } + + entries = append(entries, htpasswdEntry{ + username: t[:i], + password: t[i+1:], + }) } - return false, ErrAuthenticationFailure + + return entries } From 14f3b07db099d41b47e956ba8509a71f2f022012 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Jun 2015 19:29:27 -0700 Subject: [PATCH 157/501] Harden basic auth implementation After consideration, the basic authentication implementation has been simplified to only support bcrypt entries in an htpasswd file. This greatly increases the security of the implementation by reducing the possibility of timing attacks and other problems trying to detect the password hash type. Also, the htpasswd file is only parsed at startup, ensuring that the file can be edited and not effect ongoing requests. Newly added passwords take effect on restart. Subsequently, password hash entries are now stored in a map. Test cases have been modified accordingly. Signed-off-by: Stephen J Day --- docs/auth/basic/access.go | 16 ++- docs/auth/basic/access_test.go | 20 +++- docs/auth/basic/htpasswd.go | 166 +++++++++---------------------- docs/auth/basic/htpasswd_test.go | 85 ++++++++++++++++ docs/handlers/app.go | 1 + 5 files changed, 164 insertions(+), 124 deletions(-) create mode 100644 docs/auth/basic/htpasswd_test.go diff --git a/docs/auth/basic/access.go b/docs/auth/basic/access.go index 11e4ae5a..f7d5e79b 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/basic/access.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "net/http" + "os" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" @@ -41,7 +42,18 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, return nil, fmt.Errorf(`"path" must be set for basic access controller`) } - return &accessController{realm: realm.(string), htpasswd: newHTPasswd(path.(string))}, nil + f, err := os.Open(path.(string)) + if err != nil { + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + return nil, err + } + + return &accessController{realm: realm.(string), htpasswd: h}, nil } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { @@ -58,7 +70,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut } } - if err := ac.htpasswd.authenticateUser(ctx, username, password); err != nil { + if err := ac.htpasswd.authenticateUser(username, password); err != nil { ctxu.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, diff --git a/docs/auth/basic/access_test.go b/docs/auth/basic/access_test.go index 3bc99437..1976b32e 100644 --- a/docs/auth/basic/access_test.go +++ b/docs/auth/basic/access_test.go @@ -11,7 +11,6 @@ import ( ) func TestBasicAccessController(t *testing.T) { - testRealm := "The-Shire" testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} testPasswords := []string{"baggins", "baggins", "새주", "공주님"} @@ -85,6 +84,11 @@ func TestBasicAccessController(t *testing.T) { t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) } + nonbcrypt := map[string]struct{}{ + "bilbo": struct{}{}, + "DeokMan": struct{}{}, + } + for i := 0; i < len(testUsers); i++ { userNumber = i req, err := http.NewRequest("GET", server.URL, nil) @@ -100,9 +104,17 @@ func TestBasicAccessController(t *testing.T) { } defer resp.Body.Close() - // Request should be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } } } diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/basic/htpasswd.go index f50805e7..dd9bb1ac 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/basic/htpasswd.go @@ -2,149 +2,79 @@ package basic import ( "bufio" - "crypto/sha1" - "encoding/base64" + "fmt" "io" - "os" - "regexp" "strings" - "github.com/docker/distribution/context" "golang.org/x/crypto/bcrypt" ) -// htpasswd holds a path to a system .htpasswd file and the machinery to parse it. +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. type htpasswd struct { - path string + entries map[string][]byte // maps username to password byte slice. } -// authType represents a particular hash function used in the htpasswd file. -type authType int - -const ( - authTypePlainText authType = iota // Plain-text password storage (htpasswd -p) - authTypeSHA1 // sha hashed password storage (htpasswd -s) - authTypeApacheMD5 // apr iterated md5 hashing (htpasswd -m) - authTypeBCrypt // BCrypt adapative password hashing (htpasswd -B) - authTypeCrypt // System crypt() hashes. (htpasswd -d) -) - -var bcryptPrefixRegexp = regexp.MustCompile(`^\$2[ab]?y\$`) - -// detectAuthCredentialType inspects the credential and resolves the encryption scheme. -func detectAuthCredentialType(cred string) authType { - if strings.HasPrefix(cred, "{SHA}") { - return authTypeSHA1 +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err } - if strings.HasPrefix(cred, "$apr1$") { - return authTypeApacheMD5 - } - if bcryptPrefixRegexp.MatchString(cred) { - return authTypeBCrypt - } - // There's just not a great way to distinguish between these next two... - if len(cred) == 13 { - return authTypeCrypt - } - return authTypePlainText -} -// String Returns a text representation of the AuthType -func (at authType) String() string { - switch at { - case authTypePlainText: - return "plaintext" - case authTypeSHA1: - return "sha1" - case authTypeApacheMD5: - return "md5" - case authTypeBCrypt: - return "bcrypt" - case authTypeCrypt: - return "system crypt" - } - return "unknown" -} - -// NewHTPasswd Create a new HTPasswd with the given path to .htpasswd file. -func newHTPasswd(htpath string) *htpasswd { - return &htpasswd{path: htpath} + return &htpasswd{entries: entries}, nil } // AuthenticateUser checks a given user:password credential against the -// receiving HTPasswd's file. If the check passes, nil is returned. Note that -// this parses the htpasswd file on each request so ensure that updates are -// available. -func (htpasswd *htpasswd) authenticateUser(ctx context.Context, username string, password string) error { - // Open the file. - in, err := os.Open(htpasswd.path) +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) if err != nil { - return err - } - defer in.Close() - - for _, entry := range parseHTPasswd(ctx, in) { - if entry.username != username { - continue // wrong entry - } - - switch t := detectAuthCredentialType(entry.password); t { - case authTypeSHA1: - sha := sha1.New() - sha.Write([]byte(password)) - hash := base64.StdEncoding.EncodeToString(sha.Sum(nil)) - - if entry.password[5:] != hash { - return ErrAuthenticationFailure - } - - return nil - case authTypeBCrypt: - err := bcrypt.CompareHashAndPassword([]byte(entry.password), []byte(password)) - if err != nil { - return ErrAuthenticationFailure - } - - return nil - case authTypePlainText: - if password != entry.password { - return ErrAuthenticationFailure - } - - return nil - default: - context.GetLogger(ctx).Errorf("unsupported basic authentication type: %v", t) - } + return ErrAuthenticationFailure } - return ErrAuthenticationFailure + return nil } -// htpasswdEntry represents a line in an htpasswd file. -type htpasswdEntry struct { - username string // username, plain text - password string // stores hashed passwd -} - -// parseHTPasswd parses the contents of htpasswd. Bad entries are skipped and -// logged, so this may return empty. This will read all the entries in the -// file, whether or not they are needed. -func parseHTPasswd(ctx context.Context, rd io.Reader) []htpasswdEntry { - entries := []htpasswdEntry{} +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if an syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} scanner := bufio.NewScanner(rd) + var line int for scanner.Scan() { + line++ // 1-based line numbering t := strings.TrimSpace(scanner.Text()) - i := strings.Index(t, ":") - if i < 0 || i >= len(t) { - context.GetLogger(ctx).Errorf("bad entry in htpasswd: %q", t) + + if len(t) < 1 { continue } - entries = append(entries, htpasswdEntry{ - username: t[:i], - password: t[i+1:], - }) + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) } - return entries + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil } diff --git a/docs/auth/basic/htpasswd_test.go b/docs/auth/basic/htpasswd_test.go new file mode 100644 index 00000000..5cc86126 --- /dev/null +++ b/docs/auth/basic/htpasswd_test.go @@ -0,0 +1,85 @@ +package basic + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 2f37aa53..08c1c004 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -147,6 +147,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } app.accessController = accessController + ctxu.GetLogger(app).Debugf("configured %q access controller", authType) } return app From e667be389a1700c6c98be72405879d755a7003f4 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Jun 2015 19:40:05 -0700 Subject: [PATCH 158/501] Rename the basic access controller to htpasswd Signed-off-by: Stephen J Day --- docs/auth/{basic => htpasswd}/access.go | 10 +++++----- docs/auth/{basic => htpasswd}/access_test.go | 2 +- docs/auth/{basic => htpasswd}/htpasswd.go | 2 +- docs/auth/{basic => htpasswd}/htpasswd_test.go | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) rename docs/auth/{basic => htpasswd}/access.go (88%) rename docs/auth/{basic => htpasswd}/access_test.go (99%) rename docs/auth/{basic => htpasswd}/htpasswd.go (99%) rename docs/auth/{basic => htpasswd}/htpasswd_test.go (99%) diff --git a/docs/auth/basic/access.go b/docs/auth/htpasswd/access.go similarity index 88% rename from docs/auth/basic/access.go rename to docs/auth/htpasswd/access.go index f7d5e79b..5425b1da 100644 --- a/docs/auth/basic/access.go +++ b/docs/auth/htpasswd/access.go @@ -1,9 +1,9 @@ -// Package basic provides a simple authentication scheme that checks for the +// Package htpasswd provides a simple authentication scheme that checks for the // user credential hash in an htpasswd formatted file in a configuration-determined // location. // // This authentication method MUST be used under TLS, as simple token-replay attack is possible. -package basic +package htpasswd import ( "errors" @@ -34,12 +34,12 @@ var _ auth.AccessController = &accessController{} func newAccessController(options map[string]interface{}) (auth.AccessController, error) { realm, present := options["realm"] if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for basic access controller`) + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) } path, present := options["path"] if _, ok := path.(string); !present || !ok { - return nil, fmt.Errorf(`"path" must be set for basic access controller`) + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) } f, err := os.Open(path.(string)) @@ -98,5 +98,5 @@ func (ch *challenge) Error() string { } func init() { - auth.Register("basic", auth.InitFunc(newAccessController)) + auth.Register("htpasswd", auth.InitFunc(newAccessController)) } diff --git a/docs/auth/basic/access_test.go b/docs/auth/htpasswd/access_test.go similarity index 99% rename from docs/auth/basic/access_test.go rename to docs/auth/htpasswd/access_test.go index 1976b32e..5cb2d7c9 100644 --- a/docs/auth/basic/access_test.go +++ b/docs/auth/htpasswd/access_test.go @@ -1,4 +1,4 @@ -package basic +package htpasswd import ( "io/ioutil" diff --git a/docs/auth/basic/htpasswd.go b/docs/auth/htpasswd/htpasswd.go similarity index 99% rename from docs/auth/basic/htpasswd.go rename to docs/auth/htpasswd/htpasswd.go index dd9bb1ac..494ad0a7 100644 --- a/docs/auth/basic/htpasswd.go +++ b/docs/auth/htpasswd/htpasswd.go @@ -1,4 +1,4 @@ -package basic +package htpasswd import ( "bufio" diff --git a/docs/auth/basic/htpasswd_test.go b/docs/auth/htpasswd/htpasswd_test.go similarity index 99% rename from docs/auth/basic/htpasswd_test.go rename to docs/auth/htpasswd/htpasswd_test.go index 5cc86126..309c359a 100644 --- a/docs/auth/basic/htpasswd_test.go +++ b/docs/auth/htpasswd/htpasswd_test.go @@ -1,4 +1,4 @@ -package basic +package htpasswd import ( "fmt" From 280b9c50ac0c4bd83e26a4ce8d79783aeb38bf39 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Wed, 10 Jun 2015 20:54:24 -0700 Subject: [PATCH 159/501] Saner default data location Signed-off-by: Olivier Gambier --- docs/storage/driver/filesystem/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 82960314..d5d8708c 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -16,7 +16,7 @@ import ( ) const driverName = "filesystem" -const defaultRootDirectory = "/tmp/registry/storage" +const defaultRootDirectory = "/var/lib/registry" func init() { factory.Register(driverName, &filesystemDriverFactory{}) From f6ee0f46af41827082ee63ab261b6ddcbe4aa807 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Jun 2015 17:06:35 -0700 Subject: [PATCH 160/501] Minor formatting fixes related to htpasswd auth Signed-off-by: Stephen J Day --- docs/auth/htpasswd/access_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/auth/htpasswd/access_test.go b/docs/auth/htpasswd/access_test.go index 5cb2d7c9..ea0de425 100644 --- a/docs/auth/htpasswd/access_test.go +++ b/docs/auth/htpasswd/access_test.go @@ -85,8 +85,8 @@ func TestBasicAccessController(t *testing.T) { } nonbcrypt := map[string]struct{}{ - "bilbo": struct{}{}, - "DeokMan": struct{}{}, + "bilbo": {}, + "DeokMan": {}, } for i := 0; i < len(testUsers); i++ { From 56349665b758d500eac09798c369b546125b439b Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 3 Jun 2015 06:52:39 -0700 Subject: [PATCH 161/501] Round 4 Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 242 +++++++++++++++++--------------- docs/api/errcode/errors_test.go | 12 +- docs/api/errcode/register.go | 86 ++++++++++++ docs/api/v2/errors.go | 30 ++-- docs/client/blob_writer_test.go | 5 +- docs/handlers/api_test.go | 12 +- docs/handlers/app.go | 33 +++-- docs/handlers/app_test.go | 8 +- docs/handlers/blob.go | 10 +- docs/handlers/blobupload.go | 50 +++---- docs/handlers/helpers.go | 11 +- docs/handlers/images.go | 26 ++-- docs/handlers/tags.go | 6 +- 13 files changed, 334 insertions(+), 197 deletions(-) create mode 100644 docs/api/errcode/register.go diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index 4285dedc..cf186cfb 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -1,106 +1,29 @@ package errcode import ( + "encoding/json" "fmt" - "net/http" "strings" - "sync" ) +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + // ErrorCode represents the error type. The errors are serialized via strings // and the integer format may change and should *never* be exported. type ErrorCode int -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec } -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -// ErrorCodeUnknown is a generic error that can be used as a last -// resort if there is no situation-specific error message that can be used -var ErrorCodeUnknown = Register("registry.api.errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - code := ErrorCode(nextCode) - - descriptor.Code = code - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %s is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %d is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return code -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - return groupToDescriptors[name] +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + return ec.Descriptor().Value } // Descriptor returns the descriptor for the error code. @@ -143,12 +66,30 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + if err, ok := detail.(error); ok { + detail = err.Error() + } + + return Error{ + Code: ec, + Detail: detail, + } +} + // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code"` Detail interface{} `json:"detail,omitempty"` } +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", @@ -161,30 +102,43 @@ func (e Error) Message() string { return e.Code.Message() } +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. -type Errors []Error - -// NewError creates a new Error struct based on the passed-in info -func NewError(code ErrorCode, details ...interface{}) Error { - if len(details) > 1 { - panic("please specify zero or one detail items for this error") - } - - var detail interface{} - if len(details) > 0 { - detail = details[0] - } - - if err, ok := detail.(error); ok { - detail = err.Error() - } - - return Error{ - Code: code, - Detail: detail, - } -} +type Errors []error func (errs Errors) Error() string { switch len(errs) { @@ -205,3 +159,67 @@ func (errs Errors) Error() string { func (errs Errors) Len() int { return len(errs) } + +// jsonError extends Error with 'Message' so that we can include the +// error text, just in case the receiver of the JSON doesn't have this +// particular ErrorCode registered +type jsonError struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs []jsonError + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + tmpErrs = append(tmpErrs, jsonError{ + Code: err.Code, + Message: err.Message(), + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs []jsonError + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs { + if daErr.Detail == nil { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index aaf0d73b..d89c0253 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -79,8 +79,8 @@ var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ func TestErrorsManagement(t *testing.T) { var errs Errors - errs = append(errs, NewError(ErrorCodeTest1)) - errs = append(errs, NewError(ErrorCodeTest2, + errs = append(errs, ErrorCodeTest1) + errs = append(errs, ErrorCodeTest2.WithDetail( map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) p, err := json.Marshal(errs) @@ -89,10 +89,10 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "[{\"code\":\"TEST1\"},{\"code\":\"TEST2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]" + expectedJSON := "[{\"code\":\"TEST1\",\"message\":\"test error 1\"},{\"code\":\"TEST2\",\"message\":\"test error 2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]" if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) } // Now test the reverse @@ -106,8 +106,8 @@ func TestErrorsManagement(t *testing.T) { } // Test again with a single value this time - errs = Errors{NewError(ErrorCodeUnknown)} - expectedJSON = "[{\"code\":\"UNKNOWN\"}]" + errs = Errors{ErrorCodeUnknown} + expectedJSON = "[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]" p, err = json.Marshal(errs) if err != nil { diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go new file mode 100644 index 00000000..42f911b3 --- /dev/null +++ b/docs/api/errcode/register.go @@ -0,0 +1,86 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +// ErrorCodeUnknown is a generic error that can be used as a last +// resort if there is no situation-specific error message that can be used +var ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index c12cbc1c..14684560 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -6,9 +6,11 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) +const errGroup = "registry.api.v2" + var ( // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeUnsupported = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing @@ -16,7 +18,7 @@ var ( }) // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeUnauthorized = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "access to the requested resource is not authorized", Description: `The access controller denied access for the operation on @@ -27,7 +29,7 @@ var ( // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that @@ -39,7 +41,7 @@ var ( }) // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be @@ -50,7 +52,7 @@ var ( // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. - ErrorCodeNameInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_INVALID", Message: "invalid repository name", Description: `Invalid repository name encountered either during @@ -60,7 +62,7 @@ var ( // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. - ErrorCodeTagInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest @@ -69,7 +71,7 @@ var ( }) // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is @@ -78,7 +80,7 @@ var ( }) // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by @@ -89,7 +91,7 @@ var ( // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring @@ -101,7 +103,7 @@ var ( // ErrorCodeManifestUnverified is returned when the manifest fails // signature verfication. - ErrorCodeManifestUnverified = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature @@ -111,7 +113,7 @@ var ( // ErrorCodeManifestBlobUnknown is returned when a manifest blob is // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a manifest blob is @@ -122,7 +124,7 @@ var ( // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the @@ -133,7 +135,7 @@ var ( }) // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never @@ -142,7 +144,7 @@ var ( }) // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register("registry.api.v2", errcode.ErrorDescriptor{ + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_INVALID", Message: "blob upload invalid", Description: `The blob upload encountered an error and can no diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 74545b06..eeb9f53d 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -164,7 +164,10 @@ func TestUploadReadFrom(t *testing.T) { } else if len(uploadErr) != 1 { t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) } else { - v2Err := uploadErr[0] + v2Err, ok := uploadErr[0].(errcode.Error) + if !ok { + t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) + } if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 146fcf4c..9952d68e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -780,11 +780,15 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error counts[code] = 0 } - for _, err := range errs { - if _, ok := expected[err.Code]; !ok { - t.Fatalf("unexpected error code %v encountered during %s: %s ", err.Code, msg, string(p)) + for _, e := range errs { + err, ok := e.(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", e) } - counts[err.Code]++ + if _, ok := expected[err.ErrorCode()]; !ok { + t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) + } + counts[err.ErrorCode()]++ } // Ensure that counts of expected errors were all non-zero diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 0ef7d4ca..83b231af 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -346,9 +346,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - context.Errors = append(context.Errors, errcode.NewError(v2.ErrorCodeNameUnknown, err)) + context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) case distribution.ErrRepositoryNameInvalid: - context.Errors = append(context.Errors, errcode.NewError(v2.ErrorCodeNameInvalid, err)) + context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) } serveJSON(w, context.Errors) @@ -363,7 +363,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) - context.Errors = append(context.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) serveJSON(w, context.Errors) return @@ -383,10 +383,25 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } func (app *App) logError(context context.Context, errors errcode.Errors) { - for _, e := range errors { - c := ctxu.WithValue(context, "err.code", e.Code) - c = ctxu.WithValue(c, "err.message", e.Code.Message()) - c = ctxu.WithValue(c, "err.detail", e.Detail) + for _, e1 := range errors { + var c ctxu.Context + + switch e1.(type) { + case errcode.Error: + e, _ := e1.(errcode.Error) + c = ctxu.WithValue(context, "err.code", e.Code) + c = ctxu.WithValue(c, "err.message", e.Code.Message()) + c = ctxu.WithValue(c, "err.detail", e.Detail) + case errcode.ErrorCode: + e, _ := e1.(errcode.ErrorCode) + c = ctxu.WithValue(context, "err.code", e) + c = ctxu.WithValue(c, "err.message", e.Message()) + default: + // just normal go 'error' + c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) + c = ctxu.WithValue(c, "err.message", e1.Error()) + } + c = ctxu.WithLogger(c, ctxu.GetLogger(c, "err.code", "err.message", @@ -441,7 +456,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // proceed. var errs errcode.Errors - errs = append(errs, errcode.NewError(v2.ErrorCodeUnauthorized)) + errs = append(errs, v2.ErrorCodeUnauthorized) serveJSON(w, errs) return fmt.Errorf("forbidden: no repository name") @@ -465,7 +480,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont err.ServeHTTP(w, r) var errs errcode.Errors - errs = append(errs, errcode.NewError(v2.ErrorCodeUnauthorized, accessRecords)) + errs = append(errs, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)) serveJSON(w, errs) default: // This condition is a potential security problem either in diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index d98ae400..98ecaefd 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -201,8 +201,12 @@ func TestNewApp(t *testing.T) { t.Fatalf("error decoding error response: %v", err) } - if errs[0].Code != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", errs[0].Code, v2.ErrorCodeUnauthorized) + err2, ok := errs[0].(errcode.ErrorCoder) + if !ok { + t.Fatalf("not an ErrorCoder: %#v", errs[0]) + } + if err2.ErrorCode() != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), v2.ErrorCodeUnauthorized) } } diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index fa9f576a..e33bd3c0 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -18,12 +18,12 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { if err == errDigestNotAvailable { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors = append(ctx.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) + ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) }) } @@ -53,16 +53,16 @@ func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { desc, err := blobs.Stat(bh, bh.Digest) if err != nil { if err == distribution.ErrBlobUnknown { - bh.Errors = append(bh.Errors, errcode.NewError(v2.ErrorCodeBlobUnknown, bh.Digest)) + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) } else { - bh.Errors = append(bh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) - bh.Errors = append(bh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 7e8c3962..8dc417ba 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -37,7 +37,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } buh.State = state @@ -45,14 +45,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.Name != ctx.Repository.Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } if state.UUID != buh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } @@ -62,12 +62,12 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == distribution.ErrBlobUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) }) } buh.Upload = upload @@ -81,14 +81,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { defer upload.Close() ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) }) } else if nn != buh.State.Offset { defer upload.Close() ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) }) } @@ -119,7 +119,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh) if err != nil { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -127,7 +127,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req defer buh.Upload.Close() if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -138,7 +138,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req // GetUploadStatus returns the status of a given upload, identified by id. func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } @@ -146,7 +146,7 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // resumable upload is supported. This will enable returning a non-zero // range for clients to begin uploading at an offset. if err := buh.blobUploadResponse(w, r, true); err != nil { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -157,13 +157,13 @@ func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Req // PatchBlobData writes data to an upload. func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } ct := r.Header.Get("Content-Type") if ct != "" && ct != "application/octet-stream" { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, fmt.Errorf("Bad Content-Type"))) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) // TODO(dmcgowan): encode error return } @@ -173,12 +173,12 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // Copy the data if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } if err := buh.blobUploadResponse(w, r, false); err != nil { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -192,7 +192,7 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // url of the blob. func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } @@ -200,21 +200,21 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if dgstStr == "" { // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, "digest missing")) + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) return } dgst, err := digest.ParseDigest(dgstStr) if err != nil { // no digest? return error, but allow retry. - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, "digest parsing failed")) + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) return } // Read in the data, if any. if _, err := io.Copy(buh.Upload, r.Body); err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -229,14 +229,14 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht if err != nil { switch err := err.(type) { case distribution.ErrBlobInvalidDigest: - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) default: switch err { case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadInvalid, err)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) default: ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } } @@ -253,7 +253,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // Build our canonical blob url blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) if err != nil { - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -266,14 +266,14 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // CancelBlobUpload cancels an in-progress upload of a blob. func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { - buh.Errors = append(buh.Errors, errcode.NewError(v2.ErrorCodeBlobUploadUnknown)) + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } w.Header().Set("Docker-Upload-UUID", buh.UUID) if err := buh.Upload.Cancel(buh); err != nil { ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) - buh.Errors = append(buh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } w.WriteHeader(http.StatusNoContent) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 656d2066..c72c5784 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -16,9 +16,14 @@ func serveJSON(w http.ResponseWriter, v interface{}) error { sc := http.StatusInternalServerError if errs, ok := v.(errcode.Errors); ok && len(errs) > 0 { - sc = errs[0].Code.Descriptor().HTTPStatusCode - if sc == 0 { - sc = http.StatusInternalServerError + if err, ok := errs[0].(errcode.ErrorCoder); ok { + if sc2 := err.ErrorCode().Descriptor().HTTPStatusCode; sc2 != 0 { + sc = sc2 + } + } + } else if err, ok := v.(errcode.ErrorCoder); ok { + if sc2 := err.ErrorCode().Descriptor().HTTPStatusCode; sc2 != 0 { + sc = sc2 } } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 9d025c78..41fbabc4 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -64,7 +64,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http } if err != nil { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestUnknown, err)) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } @@ -72,7 +72,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http if imh.Digest == "" { dgst, err := digestManifest(imh, sm) if err != nil { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) return } @@ -93,13 +93,13 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http var manifest manifest.SignedManifest if err := dec.Decode(&manifest); err != nil { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestInvalid, err)) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } dgst, err := digestManifest(imh, &manifest) if err != nil { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid, err)) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) return } @@ -107,7 +107,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if imh.Tag != "" { if manifest.Tag != imh.Tag { ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeTagInvalid)) + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) return } @@ -115,11 +115,11 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } else if imh.Digest != "" { if dgst != imh.Digest { ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid)) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } } else { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeTagInvalid, "no tag or digest specified")) + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) return } @@ -131,19 +131,19 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http for _, verificationError := range err { switch verificationError := verificationError.(type) { case distribution.ErrManifestBlobUnknown: - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeBlobUnknown, verificationError.Digest)) + imh.Errors = append(imh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(verificationError.Digest)) case distribution.ErrManifestUnverified: - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeManifestUnverified)) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeDigestInvalid)) + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) } else { - imh.Errors = append(imh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, verificationError)) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) } } } default: - imh.Errors = append(imh.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return @@ -172,7 +172,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h // tag index entries a serious problem in eventually consistent storage. // Once we work out schema version 2, the full deletion system will be // worked out and we can add support back. - imh.Errors = append(imh.Errors, errcode.NewError(v2.ErrorCodeUnsupported)) + imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) } // digestManifest takes a digest of the given manifest. This belongs somewhere diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index e1846cf9..00f9760e 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -40,9 +40,9 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, errcode.NewError(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Repository.Name()})) + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) default: - th.Errors = append(th.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } @@ -54,7 +54,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { Name: th.Repository.Name(), Tags: tags, }); err != nil { - th.Errors = append(th.Errors, errcode.NewError(errcode.ErrorCodeUnknown, err)) + th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } From 5c372ded1b2e1941e24ee7b97dfe96fca314124d Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Thu, 11 Jun 2015 15:30:18 -0700 Subject: [PATCH 162/501] storage/driver/azure: Update vendored Azure SDK This change refreshes the updated version of Azure SDK for Go that has the latest changes. I manually vendored the new SDK (github.com/Azure/azure-sdk-for-go) and I removed `management/` `core/` packages manually simply because they're not used here and they have a fork of `net/http` and `crypto/tls` for a particular reason. It was introducing a 44k SLOC change otherwise... This also undoes the `include_azure` flag (actually Steven removed the driver from imports but forgot to add the build flag apparently, so the flag wasn't really including azure. :smile: ). This also must be obsolete now. Fixes #620, #175. Signed-off-by: Ahmet Alp Balkan --- docs/storage/driver/azure/azure.go | 23 +++++++++++++------ docs/storage/driver/azure/blockblob.go | 2 +- docs/storage/driver/azure/blockblob_test.go | 8 +++---- docs/storage/driver/azure/blockid.go | 2 +- docs/storage/driver/azure/blockid_test.go | 2 +- docs/storage/driver/azure/randomwriter.go | 14 +++++------ .../storage/driver/azure/randomwriter_test.go | 2 +- 7 files changed, 31 insertions(+), 22 deletions(-) diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index d21a8259..cbb95981 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -16,7 +16,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) const driverName = "azure" @@ -68,7 +68,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { realm, ok := parameters[paramRealm] if !ok || fmt.Sprint(realm) == "" { - realm = azure.DefaultBaseUrl + realm = azure.DefaultBaseURL } return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) @@ -76,7 +76,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // New constructs a new Driver with the given Azure Storage Account credentials func New(accountName, accountKey, container, realm string) (*Driver, error) { - api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) + api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) if err != nil { return nil, err } @@ -89,7 +89,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { } d := &driver{ - client: *blobClient, + client: blobClient, container: container} return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil } @@ -114,7 +114,16 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) + if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { + return err + } + if err := d.client.CreateBlockBlob(d.container, path); err != nil { + return err + } + bs := newAzureBlockStorage(d.client) + bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) + _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) + return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a @@ -233,7 +242,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) + sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) if err != nil { if is404(err) { @@ -352,6 +361,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { } func is404(err error) bool { - e, ok := err.(azure.StorageServiceError) + e, ok := err.(azure.AzureStorageServiceError) return ok && e.StatusCode == http.StatusNotFound } diff --git a/docs/storage/driver/azure/blockblob.go b/docs/storage/driver/azure/blockblob.go index 10b2bf21..1c1df899 100644 --- a/docs/storage/driver/azure/blockblob.go +++ b/docs/storage/driver/azure/blockblob.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) // azureBlockStorage is adaptor between azure.BlobStorageClient and diff --git a/docs/storage/driver/azure/blockblob_test.go b/docs/storage/driver/azure/blockblob_test.go index c29b4742..7ce47195 100644 --- a/docs/storage/driver/azure/blockblob_test.go +++ b/docs/storage/driver/azure/blockblob_test.go @@ -6,7 +6,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) type StorageSimulator struct { @@ -122,12 +122,12 @@ func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.B var blockIDs []string for _, v := range blocks { - bl, ok := bb.blocks[v.Id] + bl, ok := bb.blocks[v.ID] if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.Id) + return fmt.Errorf("Block id '%s' not found", v.ID) } bl.committed = true - blockIDs = append(blockIDs, v.Id) + blockIDs = append(blockIDs, v.ID) } // Mark all other blocks uncommitted diff --git a/docs/storage/driver/azure/blockid.go b/docs/storage/driver/azure/blockid.go index f6bda6a8..776c7cd5 100644 --- a/docs/storage/driver/azure/blockid.go +++ b/docs/storage/driver/azure/blockid.go @@ -7,7 +7,7 @@ import ( "sync" "time" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) type blockIDGenerator struct { diff --git a/docs/storage/driver/azure/blockid_test.go b/docs/storage/driver/azure/blockid_test.go index 6569e15d..aab70202 100644 --- a/docs/storage/driver/azure/blockid_test.go +++ b/docs/storage/driver/azure/blockid_test.go @@ -4,7 +4,7 @@ import ( "math" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) func Test_blockIdGenerator(t *testing.T) { diff --git a/docs/storage/driver/azure/randomwriter.go b/docs/storage/driver/azure/randomwriter.go index b570d559..f18692d0 100644 --- a/docs/storage/driver/azure/randomwriter.go +++ b/docs/storage/driver/azure/randomwriter.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) // blockStorage is the interface required from a block storage service @@ -75,7 +75,7 @@ func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chu // Use existing block list var existingBlocks []azure.Block for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } blockList = append(existingBlocks, blockList...) } @@ -111,7 +111,7 @@ func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.R if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { return newBlocks, nn, err } - newBlocks = append(newBlocks, azure.Block{Id: blockID, Status: azure.BlockStatusUncommitted}) + newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) } return newBlocks, nn, nil } @@ -131,7 +131,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in for _, v := range bx.CommittedBlocks { blkSize := int64(v.Size) if o >= blkSize { // use existing block - left = append(left, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) o -= blkSize elapsed += blkSize } else if o > 0 { // current block needs to be splitted @@ -150,7 +150,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return left, err } - left = append(left, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) break } } @@ -177,7 +177,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i ) if bs > re { // take the block as is - right = append(right, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) + right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } else if be > re { // current block needs to be splitted part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) if err != nil { @@ -192,7 +192,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return right, err } - right = append(right, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) + right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) } elapsed += int64(v.Size) } diff --git a/docs/storage/driver/azure/randomwriter_test.go b/docs/storage/driver/azure/randomwriter_test.go index 2c7480db..32c2509e 100644 --- a/docs/storage/driver/azure/randomwriter_test.go +++ b/docs/storage/driver/azure/randomwriter_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - azure "github.com/MSOpenTech/azure-sdk-for-go/storage" + azure "github.com/Azure/azure-sdk-for-go/storage" ) func TestRandomWriter_writeChunkToBlocks(t *testing.T) { From f9e152d912ea660e06aec313f519bf5ff62720da Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Jun 2015 10:46:18 +0800 Subject: [PATCH 163/501] Ensure that rados is disabled without build tag This ensures that rados is not required when building the registry. This was slightly tricky in that when the flags were applied, the rados package was completely missing. This led to a problem where rados was basically unlistable and untestable as a package. This was fixed by simply adding a doc.go file that is included whether rados is built or not. Signed-off-by: Stephen J Day --- docs/storage/driver/rados/doc.go | 3 +++ docs/storage/driver/rados/rados.go | 2 ++ docs/storage/driver/rados/rados_test.go | 2 ++ 3 files changed, 7 insertions(+) create mode 100644 docs/storage/driver/rados/doc.go diff --git a/docs/storage/driver/rados/doc.go b/docs/storage/driver/rados/doc.go new file mode 100644 index 00000000..655c68a3 --- /dev/null +++ b/docs/storage/driver/rados/doc.go @@ -0,0 +1,3 @@ +// Package rados implements the rados storage driver backend. Support can be +// enabled by including the "include_rados" build tag. +package rados diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index 9bac8fc3..0ea10a89 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -1,3 +1,5 @@ +// +build include_rados + package rados import ( diff --git a/docs/storage/driver/rados/rados_test.go b/docs/storage/driver/rados/rados_test.go index 29486e89..d408519b 100644 --- a/docs/storage/driver/rados/rados_test.go +++ b/docs/storage/driver/rados/rados_test.go @@ -1,3 +1,5 @@ +// +build include_rados + package rados import ( From cff1a5ffdcca2ca5cc348eefa3be5d2999d52bb9 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Tue, 16 Jun 2015 18:57:47 -0700 Subject: [PATCH 164/501] Move challenge http status code logic See: https://github.com/docker/distribution/blob/d796729b6bb527689219b9547cbf98595058409d/registry/handlers/app.go#L498 Per the comment on line 498, this moves the logic of setting the http status code into the serveJSON func, leaving the auth.Challenge.ServeHTTP() func to just set the auth challenge header. Signed-off-by: Doug Davis --- docs/api/v2/errors.go | 2 +- docs/auth/auth.go | 8 ++++---- docs/auth/htpasswd/access.go | 1 - docs/auth/htpasswd/access_test.go | 1 + docs/auth/silly/access.go | 1 - docs/auth/silly/access_test.go | 1 + docs/auth/token/accesscontroller.go | 3 +-- docs/handlers/app.go | 11 +---------- 8 files changed, 9 insertions(+), 19 deletions(-) diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index 14684560..87e27f2e 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -24,7 +24,7 @@ var ( Description: `The access controller denied access for the operation on a resource. Often this will be accompanied by a 401 Unauthorized response status.`, - HTTPStatusCode: http.StatusForbidden, + HTTPStatusCode: http.StatusUnauthorized, }) // ErrorCodeDigestInvalid is returned when uploading a blob if the diff --git a/docs/auth/auth.go b/docs/auth/auth.go index ec82b469..3107537e 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -62,10 +62,10 @@ type Access struct { type Challenge interface { error // ServeHTTP prepares the request to conduct the appropriate challenge - // response. For most implementations, simply calling ServeHTTP should be - // sufficient. Because no body is written, users may write a custom body after - // calling ServeHTTP, but any headers must be written before the call and may - // be overwritten. + // response by adding the appropriate HTTP challenge header on the response + // message. Callers are expected to set the appropriate HTTP status code + // (e.g. 401) themselves. Because no body is written, users may write a + // custom body after calling ServeHTTP. ServeHTTP(w http.ResponseWriter, r *http.Request) } diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index 5425b1da..b8c4d41e 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -90,7 +90,6 @@ type challenge struct { func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { header := fmt.Sprintf("Basic realm=%q", ch.realm) w.Header().Set("WWW-Authenticate", header) - w.WriteHeader(http.StatusUnauthorized) } func (ch *challenge) Error() string { diff --git a/docs/auth/htpasswd/access_test.go b/docs/auth/htpasswd/access_test.go index ea0de425..79e9422c 100644 --- a/docs/auth/htpasswd/access_test.go +++ b/docs/auth/htpasswd/access_test.go @@ -49,6 +49,7 @@ func TestBasicAccessController(t *testing.T) { switch err := err.(type) { case auth.Challenge: err.ServeHTTP(w, r) + w.WriteHeader(http.StatusUnauthorized) return default: t.Fatalf("unexpected error authorizing request: %v", err) diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 39318d1a..7ae43e25 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -83,7 +83,6 @@ func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { } w.Header().Set("WWW-Authenticate", header) - w.WriteHeader(http.StatusUnauthorized) } func (ch *challenge) Error() string { diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go index d579e878..2fd160de 100644 --- a/docs/auth/silly/access_test.go +++ b/docs/auth/silly/access_test.go @@ -22,6 +22,7 @@ func TestSillyAccessController(t *testing.T) { switch err := err.(type) { case auth.Challenge: err.ServeHTTP(w, r) + w.WriteHeader(http.StatusUnauthorized) return default: t.Fatalf("unexpected error authorizing request: %v", err) diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go index 4547336a..c947b67d 100644 --- a/docs/auth/token/accesscontroller.go +++ b/docs/auth/token/accesscontroller.go @@ -117,10 +117,9 @@ func (ac *authChallenge) SetHeader(header http.Header) { } // ServeHttp handles writing the challenge response -// by setting the challenge header and status code. +// by setting the challenge header. func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { ac.SetHeader(w.Header()) - w.WriteHeader(ac.Status()) } // accessController implements the auth.AccessController interface. diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f7b7c8c4..d3985067 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -495,16 +495,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont if err != nil { switch err := err.(type) { case auth.Challenge: - // NOTE(duglin): - // Since err.ServeHTTP will set the HTTP status code for us - // we need to set the content-type here. The serveJSON - // func will try to do it but it'll be too late at that point. - // I would have have preferred to just have the auth.Challenge - // ServerHTTP func just add the WWW-Authenticate header and let - // serveJSON set the HTTP status code and content-type but I wasn't - // sure if that's an ok design change. STEVVOOE ? - w.Header().Set("Content-Type", "application/json; charset=utf-8") - + // Add the appropriate WWW-Auth header err.ServeHTTP(w, r) var errs errcode.Errors From 365de1b215b9d263551f3d51f522534fd8b58d23 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 18 Jun 2015 18:00:26 -0700 Subject: [PATCH 165/501] Add back in the "errors" wrapper in the Errors serialization See: https://github.com/docker/distribution/pull/548/files#r32794064 Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 12 ++++++++---- docs/api/errcode/errors_test.go | 4 ++-- docs/client/blob_writer_test.go | 14 ++++++++------ 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index cf186cfb..a68aaad5 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -172,7 +172,9 @@ type jsonError struct { // MarshalJSON converts slice of error, ErrorCode or Error into a // slice of Error - then serializes func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs []jsonError + var tmpErrs struct { + Errors []jsonError `json:"errors,omitempty"` + } for _, daErr := range errs { var err Error @@ -187,7 +189,7 @@ func (errs Errors) MarshalJSON() ([]byte, error) { } - tmpErrs = append(tmpErrs, jsonError{ + tmpErrs.Errors = append(tmpErrs.Errors, jsonError{ Code: err.Code, Message: err.Message(), Detail: err.Detail, @@ -200,14 +202,16 @@ func (errs Errors) MarshalJSON() ([]byte, error) { // UnmarshalJSON deserializes []Error and then converts it into slice of // Error or ErrorCode func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs []jsonError + var tmpErrs struct { + Errors []jsonError + } if err := json.Unmarshal(data, &tmpErrs); err != nil { return err } var newErrs Errors - for _, daErr := range tmpErrs { + for _, daErr := range tmpErrs.Errors { if daErr.Detail == nil { // Error's w/o details get converted to ErrorCode newErrs = append(newErrs, daErr.Code) diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index d89c0253..684e263a 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -89,7 +89,7 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "[{\"code\":\"TEST1\",\"message\":\"test error 1\"},{\"code\":\"TEST2\",\"message\":\"test error 2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]" + expectedJSON := "{\"errors\":[{\"code\":\"TEST1\",\"message\":\"test error 1\"},{\"code\":\"TEST2\",\"message\":\"test error 2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" if string(p) != expectedJSON { t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) @@ -107,7 +107,7 @@ func TestErrorsManagement(t *testing.T) { // Test again with a single value this time errs = Errors{ErrorCodeUnknown} - expectedJSON = "[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]" + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" p, err = json.Marshal(errs) if err != nil { diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index eeb9f53d..8436ca9a 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -86,12 +86,14 @@ func TestUploadReadFrom(t *testing.T) { Response: testutil.Response{ StatusCode: http.StatusBadRequest, Body: []byte(` - [ - { - "code": "BLOB_UPLOAD_INVALID", - "detail": "more detail" - } - ] `), + { "errors": + [ + { + "code": "BLOB_UPLOAD_INVALID", + "detail": "more detail" + } + ] + } `), }, }, // Test 400 invalid json From 805b135bcc896e03d957f37ce401a0f4ca0f5883 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 18 Jun 2015 18:24:54 -0700 Subject: [PATCH 166/501] Add 'message' back to BlobTest sample json Signed-off-by: Doug Davis --- docs/client/blob_writer_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index 8436ca9a..e3c880e1 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -90,6 +90,7 @@ func TestUploadReadFrom(t *testing.T) { [ { "code": "BLOB_UPLOAD_INVALID", + "message": "invalid upload identifier", "detail": "more detail" } ] From 6bedf7d1cd00223b0f3e81eabf78dbd2148382a7 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Thu, 18 Jun 2015 16:56:05 -0700 Subject: [PATCH 167/501] Add Etag header for manifests. Return 304 (Not Modified) if retrieved with If-None-Match header Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 29 +++++++++++++++++++++++++++++ docs/handlers/images.go | 18 ++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 9952d68e..8d631941 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -449,6 +449,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{dgst.String()}, }) var fetchedManifest manifest.SignedManifest @@ -470,6 +471,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{dgst.String()}, }) var fetchedManifestByDigest manifest.SignedManifest @@ -482,6 +484,33 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("manifests do not match") } + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 41fbabc4..747b2780 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -60,6 +60,10 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http if imh.Tag != "" { sm, err = manifests.GetByTag(imh.Tag) } else { + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } sm, err = manifests.Get(imh.Digest) } @@ -75,6 +79,10 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) return } + if etagMatch(r, dgst.String()) { + w.WriteHeader(http.StatusNotModified) + return + } imh.Digest = dgst } @@ -82,9 +90,19 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", imh.Digest.String()) w.Write(sm.Raw) } +func etagMatch(r *http.Request, etag string) bool { + for _, headerVal := range r.Header["If-None-Match"] { + if headerVal == etag { + return true + } + } + return false +} + // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") From 6d46ae5fdb72d07dc077cac6a0c1c36d988d9ac4 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 29 Jun 2015 16:44:06 -0700 Subject: [PATCH 168/501] Prevent the ErrUnsupportedMethod error from being returned up the stack. It eventually causes the go http library to do a double WriteHeader() which is an error Signed-off-by: Richard Scothern --- docs/storage/blobserver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index 065453e6..a7b42681 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -65,6 +65,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h } http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil } // Some unexpected error. From 6167220cdddac3589205ef49e81dab311a35a287 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 29 Jun 2015 16:39:45 -0700 Subject: [PATCH 169/501] Remove half-baked Storage Driver IPC support This removes documentation and code related to IPC based storage driver plugins. The existence of this functionality was an original feature goal but is now not maintained and actively confusing incoming contributions. We will likely explore some driver plugin mechanism in the future but we don't need this laying around in the meantime. Signed-off-by: Stephen J Day --- docs/storage/driver/azure/azure_test.go | 8 +- docs/storage/driver/factory/factory.go | 24 +- docs/storage/driver/filesystem/driver_test.go | 5 +- docs/storage/driver/inmemory/driver_test.go | 7 +- docs/storage/driver/ipc/client.go | 454 ------------------ docs/storage/driver/ipc/ipc.go | 148 ------ docs/storage/driver/ipc/server.go | 178 ------- docs/storage/driver/rados/rados_test.go | 2 +- docs/storage/driver/s3/s3_test.go | 88 ++-- docs/storage/driver/testsuites/testsuites.go | 46 +- 10 files changed, 49 insertions(+), 911 deletions(-) delete mode 100644 docs/storage/driver/ipc/client.go delete mode 100644 docs/storage/driver/ipc/ipc.go delete mode 100644 docs/storage/driver/ipc/server.go diff --git a/docs/storage/driver/azure/azure_test.go b/docs/storage/driver/azure/azure_test.go index 4990ba19..4a0661b3 100644 --- a/docs/storage/driver/azure/azure_test.go +++ b/docs/storage/driver/azure/azure_test.go @@ -59,11 +59,5 @@ func init() { return "" } - testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck) - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // paramAccountName: accountName, - // paramAccountKey: accountKey, - // paramContainer: container, - // paramRealm: realm, - // }, skipCheck) + testsuites.RegisterSuite(azureDriverConstructor, skipCheck) } diff --git a/docs/storage/driver/factory/factory.go b/docs/storage/driver/factory/factory.go index 66d160f3..e84f0026 100644 --- a/docs/storage/driver/factory/factory.go +++ b/docs/storage/driver/factory/factory.go @@ -33,30 +33,14 @@ func Register(name string, factory StorageDriverFactory) { driverFactories[name] = factory } -// Create a new storagedriver.StorageDriver with the given name and parameters -// To run in-process, the StorageDriverFactory must first be registered with the given name -// If no in-process drivers are found with the given name, this attempts to create an IPC driver -// If no in-process or external drivers are found, an InvalidStorageDriverError is returned +// Create a new storagedriver.StorageDriver with the given name and +// parameters. To use a driver, the StorageDriverFactory must first be +// registered with the given name. If no drivers are found, an +// InvalidStorageDriverError is returned func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { return nil, InvalidStorageDriverError{name} - - // NOTE(stevvooe): We are disabling storagedriver ipc for now, as the - // server and client need to be updated for the changed API calls and - // there were some problems libchan hanging. We'll phase this - // functionality back in over the next few weeks. - - // No registered StorageDriverFactory found, try ipc - // driverClient, err := ipc.NewDriverClient(name, parameters) - // if err != nil { - // return nil, InvalidStorageDriverError{name} - // } - // err = driverClient.Start() - // if err != nil { - // return nil, err - // } - // return driverClient, nil } return driverFactory.Create(parameters) } diff --git a/docs/storage/driver/filesystem/driver_test.go b/docs/storage/driver/filesystem/driver_test.go index 8572de16..8b48b431 100644 --- a/docs/storage/driver/filesystem/driver_test.go +++ b/docs/storage/driver/filesystem/driver_test.go @@ -20,10 +20,7 @@ func init() { } defer os.Remove(root) - testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return New(root), nil }, testsuites.NeverSkip) - - // BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later. - // testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip) } diff --git a/docs/storage/driver/inmemory/driver_test.go b/docs/storage/driver/inmemory/driver_test.go index a02ff23e..dbc1916f 100644 --- a/docs/storage/driver/inmemory/driver_test.go +++ b/docs/storage/driver/inmemory/driver_test.go @@ -5,7 +5,6 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - "gopkg.in/check.v1" ) @@ -16,9 +15,5 @@ func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { return New(), nil } - testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - - // BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot - // the problems with libchan. - // testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) } diff --git a/docs/storage/driver/ipc/client.go b/docs/storage/driver/ipc/client.go deleted file mode 100644 index daa823d7..00000000 --- a/docs/storage/driver/ipc/client.go +++ /dev/null @@ -1,454 +0,0 @@ -// +build ignore - -package ipc - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "syscall" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverExecutablePrefix is the prefix which the IPC storage driver -// loader expects driver executables to begin with. For example, the s3 driver -// should be named "registry-storagedriver-s3". -const StorageDriverExecutablePrefix = "registry-storagedriver-" - -// StorageDriverClient is a storagedriver.StorageDriver implementation using a -// managed child process communicating over IPC using libchan with a unix domain -// socket -type StorageDriverClient struct { - subprocess *exec.Cmd - exitChan chan error - exitErr error - stopChan chan struct{} - socket *os.File - transport *spdy.Transport - sender libchan.Sender - version storagedriver.Version -} - -// NewDriverClient constructs a new out-of-process storage driver using the -// driver name and configuration parameters -// A user must call Start on this driver client before remote method calls can -// be made -// -// Looks for drivers in the following locations in order: -// - Storage drivers directory (to be determined, yet not implemented) -// - $GOPATH/bin -// - $PATH -func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { - paramsBytes, err := json.Marshal(parameters) - if err != nil { - return nil, err - } - - driverExecName := StorageDriverExecutablePrefix + name - driverPath, err := exec.LookPath(driverExecName) - if err != nil { - return nil, err - } - - command := exec.Command(driverPath, string(paramsBytes)) - - return &StorageDriverClient{ - subprocess: command, - }, nil -} - -// Start starts the designated child process storage driver and binds a socket -// to this process for IPC method calls -func (driver *StorageDriverClient) Start() error { - driver.exitErr = nil - driver.exitChan = make(chan error) - driver.stopChan = make(chan struct{}) - - fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) - if err != nil { - return err - } - - childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") - driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") - - driver.subprocess.Stdout = os.Stdout - driver.subprocess.Stderr = os.Stderr - driver.subprocess.ExtraFiles = []*os.File{childSocket} - - if err = driver.subprocess.Start(); err != nil { - driver.Stop() - return err - } - - go driver.handleSubprocessExit() - - if err = childSocket.Close(); err != nil { - driver.Stop() - return err - } - - connection, err := net.FileConn(driver.socket) - if err != nil { - driver.Stop() - return err - } - driver.transport, err = spdy.NewClientTransport(connection) - if err != nil { - driver.Stop() - return err - } - driver.sender, err = driver.transport.NewSendChannel() - if err != nil { - driver.Stop() - return err - } - - // Check the driver's version to determine compatibility - receiver, remoteSender := libchan.Pipe() - err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender}) - if err != nil { - driver.Stop() - return err - } - - var response VersionResponse - err = receiver.Receive(&response) - if err != nil { - driver.Stop() - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - driver.version = response.Version - - if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() { - return IncompatibleVersionError{driver.version} - } - - return nil -} - -// Stop stops the child process storage driver -// storagedriver.StorageDriver methods called after Stop will fail -func (driver *StorageDriverClient) Stop() error { - var closeSenderErr, closeTransportErr, closeSocketErr, killErr error - - if driver.sender != nil { - closeSenderErr = driver.sender.Close() - } - if driver.transport != nil { - closeTransportErr = driver.transport.Close() - } - if driver.socket != nil { - closeSocketErr = driver.socket.Close() - } - if driver.subprocess != nil { - killErr = driver.subprocess.Process.Kill() - } - if driver.stopChan != nil { - close(driver.stopChan) - } - - if closeSenderErr != nil { - return closeSenderErr - } else if closeTransportErr != nil { - return closeTransportErr - } else if closeSocketErr != nil { - return closeSocketErr - } - - return killErr -} - -// Implement the storagedriver.StorageDriver interface over IPC - -// GetContent retrieves the content stored at "path" as a []byte. -func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - defer response.Reader.Close() - contents, err := ioutil.ReadAll(response.Reader) - if err != nil { - return nil, err - } - return contents, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - - params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} - err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset} - err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ReadStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Reader, nil -} - -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader} - err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(WriteStreamResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// CurrentSize retrieves the curernt size in bytes of the object at the given -// path. -func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { - if err := driver.exited(); err != nil { - return 0, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return 0, err - } - - response := new(CurrentSizeResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return 0, err - } - - if response.Error != nil { - return 0, response.Error.Unwrap() - } - - return response.Position, nil -} - -// List returns a list of the objects that are direct descendants of the given -// path. -func (driver *StorageDriverClient) List(path string) ([]string, error) { - if err := driver.exited(); err != nil { - return nil, err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return nil, err - } - - response := new(ListResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, response.Error.Unwrap() - } - - return response.Keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} - err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(MoveResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (driver *StorageDriverClient) Delete(path string) error { - if err := driver.exited(); err != nil { - return err - } - - receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) - if err != nil { - return err - } - - response := new(DeleteResponse) - err = driver.receiveResponse(receiver, response) - if err != nil { - return err - } - - if response.Error != nil { - return response.Error.Unwrap() - } - - return nil -} - -// handleSubprocessExit populates the exit channel until we have explicitly -// stopped the storage driver subprocess -// Requests can select on driver.exitChan and response receiving and not hang if -// the process exits -func (driver *StorageDriverClient) handleSubprocessExit() { - exitErr := driver.subprocess.Wait() - if exitErr == nil { - exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly") - } else { - exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr) - } - - driver.exitErr = exitErr - - for { - select { - case driver.exitChan <- exitErr: - case <-driver.stopChan: - close(driver.exitChan) - return - } - } -} - -// receiveResponse populates the response value with the next result from the -// given receiver, or returns an error if receiving failed or the driver has -// stopped -func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { - receiveChan := make(chan error, 1) - go func(receiver libchan.Receiver, receiveChan chan<- error) { - receiveChan <- receiver.Receive(response) - }(receiver, receiveChan) - - var err error - var ok bool - select { - case err = <-receiveChan: - case err, ok = <-driver.exitChan: - if !ok { - err = driver.exitErr - } - } - - return err -} - -// exited returns an exit error if the driver has exited or nil otherwise -func (driver *StorageDriverClient) exited() error { - select { - case err, ok := <-driver.exitChan: - if !ok { - return driver.exitErr - } - return err - default: - return nil - } -} diff --git a/docs/storage/driver/ipc/ipc.go b/docs/storage/driver/ipc/ipc.go deleted file mode 100644 index dabb834d..00000000 --- a/docs/storage/driver/ipc/ipc.go +++ /dev/null @@ -1,148 +0,0 @@ -// +build ignore - -package ipc - -import ( - "fmt" - "io" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" -) - -// StorageDriver is the interface which IPC storage drivers must implement. As external storage -// drivers may be defined to use a different version of the storagedriver.StorageDriver interface, -// we use an additional version check to determine compatiblity. -type StorageDriver interface { - // Version returns the storagedriver.StorageDriver interface version which this storage driver - // implements, which is used to determine driver compatibility - Version() (storagedriver.Version, error) -} - -// IncompatibleVersionError is returned when a storage driver is using an incompatible version of -// the storagedriver.StorageDriver api -type IncompatibleVersionError struct { - version storagedriver.Version -} - -func (e IncompatibleVersionError) Error() string { - return fmt.Sprintf("Incompatible storage driver version: %s", e.version) -} - -// Request defines a remote method call request -// A return value struct is to be sent over the ResponseChannel -type Request struct { - Type string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` - ResponseChannel libchan.Sender `codec:",omitempty"` -} - -// ResponseError is a serializable error type. -// The Type and Parameters may be used to reconstruct the same error on the -// client side, falling back to using the Type and Message if this cannot be -// done. -type ResponseError struct { - Type string `codec:",omitempty"` - Message string `codec:",omitempty"` - Parameters map[string]interface{} `codec:",omitempty"` -} - -// WrapError wraps an error in a serializable struct containing the error's type -// and message. -func WrapError(err error) *ResponseError { - if err == nil { - return nil - } - v := reflect.ValueOf(err) - re := ResponseError{ - Type: v.Type().String(), - Message: err.Error(), - } - - if v.Kind() == reflect.Struct { - re.Parameters = make(map[string]interface{}) - for i := 0; i < v.NumField(); i++ { - field := v.Type().Field(i) - re.Parameters[field.Name] = v.Field(i).Interface() - } - } - return &re -} - -// Unwrap returns the underlying error if it can be reconstructed, or the -// original ResponseError otherwise. -func (err *ResponseError) Unwrap() error { - var errVal reflect.Value - var zeroVal reflect.Value - - switch err.Type { - case "storagedriver.PathNotFoundError": - errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{}) - case "storagedriver.InvalidOffsetError": - errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{}) - } - if errVal == zeroVal { - return err - } - - for k, v := range err.Parameters { - fieldVal := errVal.Elem().FieldByName(k) - if fieldVal == zeroVal { - return err - } - fieldVal.Set(reflect.ValueOf(v)) - } - - if unwrapped, ok := errVal.Elem().Interface().(error); ok { - return unwrapped - } - - return err - -} - -func (err *ResponseError) Error() string { - return fmt.Sprintf("%s: %s", err.Type, err.Message) -} - -// IPC method call response object definitions - -// VersionResponse is a response for a Version request -type VersionResponse struct { - Version storagedriver.Version `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ReadStreamResponse is a response for a ReadStream request -type ReadStreamResponse struct { - Reader io.ReadCloser `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// WriteStreamResponse is a response for a WriteStream request -type WriteStreamResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// CurrentSizeResponse is a response for a CurrentSize request -type CurrentSizeResponse struct { - Position uint64 `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// ListResponse is a response for a List request -type ListResponse struct { - Keys []string `codec:",omitempty"` - Error *ResponseError `codec:",omitempty"` -} - -// MoveResponse is a response for a Move request -type MoveResponse struct { - Error *ResponseError `codec:",omitempty"` -} - -// DeleteResponse is a response for a Delete request -type DeleteResponse struct { - Error *ResponseError `codec:",omitempty"` -} diff --git a/docs/storage/driver/ipc/server.go b/docs/storage/driver/ipc/server.go deleted file mode 100644 index 1752f12b..00000000 --- a/docs/storage/driver/ipc/server.go +++ /dev/null @@ -1,178 +0,0 @@ -// +build ignore - -package ipc - -import ( - "bytes" - "io" - "io/ioutil" - "net" - "os" - "reflect" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/libchan" - "github.com/docker/libchan/spdy" -) - -// StorageDriverServer runs a new IPC server handling requests for the given -// storagedriver.StorageDriver -// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in -// client.go -// -// To create a new out-of-process driver, create a main package which calls StorageDriverServer with -// a storagedriver.StorageDriver -func StorageDriverServer(driver storagedriver.StorageDriver) error { - childSocket := os.NewFile(3, "childSocket") - defer childSocket.Close() - conn, err := net.FileConn(childSocket) - if err != nil { - panic(err) - } - defer conn.Close() - if transport, err := spdy.NewServerTransport(conn); err != nil { - panic(err) - } else { - for { - receiver, err := transport.WaitReceiveChannel() - if err == io.EOF { - return nil - } else if err != nil { - panic(err) - } - go receive(driver, receiver) - } - } -} - -// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to -// handle each request -// Requests are expected to be of type ipc.Request as the parameters are unknown until the request -// type is deserialized -func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { - for { - var request Request - err := receiver.Receive(&request) - if err == io.EOF { - return - } else if err != nil { - panic(err) - } - go handleRequest(driver, request) - } -} - -// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go -// Responds to requests using the Request.ResponseChannel -func handleRequest(driver storagedriver.StorageDriver, request Request) { - switch request.Type { - case "Version": - err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion}) - if err != nil { - panic(err) - } - case "GetContent": - path, _ := request.Parameters["Path"].(string) - content, err := driver.GetContent(path) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "PutContent": - path, _ := request.Parameters["Path"].(string) - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - contents, err := ioutil.ReadAll(reader) - defer reader.Close() - if err == nil { - err = driver.PutContent(path, contents) - } - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "ReadStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be converted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, err := driver.ReadStream(path, offset) - var response ReadStreamResponse - if err != nil { - response = ReadStreamResponse{Error: WrapError(err)} - } else { - response = ReadStreamResponse{Reader: reader} - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "WriteStream": - path, _ := request.Parameters["Path"].(string) - // Depending on serialization method, Offset may be converted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() - // Depending on serialization method, Size may be converted to any int/uint type - size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() - reader, _ := request.Parameters["Reader"].(io.ReadCloser) - err := driver.WriteStream(path, offset, size, reader) - response := WriteStreamResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "CurrentSize": - path, _ := request.Parameters["Path"].(string) - position, err := driver.CurrentSize(path) - response := CurrentSizeResponse{ - Position: position, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "List": - path, _ := request.Parameters["Path"].(string) - keys, err := driver.List(path) - response := ListResponse{ - Keys: keys, - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Move": - sourcePath, _ := request.Parameters["SourcePath"].(string) - destPath, _ := request.Parameters["DestPath"].(string) - err := driver.Move(sourcePath, destPath) - response := MoveResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - case "Delete": - path, _ := request.Parameters["Path"].(string) - err := driver.Delete(path) - response := DeleteResponse{ - Error: WrapError(err), - } - err = request.ResponseChannel.Send(&response) - if err != nil { - panic(err) - } - default: - panic(request) - } -} diff --git a/docs/storage/driver/rados/rados_test.go b/docs/storage/driver/rados/rados_test.go index d408519b..ce367fb5 100644 --- a/docs/storage/driver/rados/rados_test.go +++ b/docs/storage/driver/rados/rados_test.go @@ -36,5 +36,5 @@ func init() { return "" } - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) + testsuites.RegisterSuite(driverConstructor, skipCheck) } diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index c608e454..70172a6d 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -17,7 +17,8 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -type S3DriverConstructor func(rootDirectory string) (*Driver, error) +var s3DriverConstructor func(rootDirectory string) (*Driver, error) +var skipS3 func() string func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") @@ -33,7 +34,7 @@ func init() { } defer os.Remove(root) - s3DriverConstructor := func(rootDirectory string) (*Driver, error) { + s3DriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -74,79 +75,64 @@ func init() { } // Skip S3 storage driver tests if environment variable parameters are not provided - skipCheck := func() string { + skipS3 = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" } return "" } - driverConstructor := func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return s3DriverConstructor(root) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) } - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - // s3Constructor := func() (*Driver, error) { - // return s3DriverConstructor(aws.GetRegion(region)) - // } - - RegisterS3DriverSuite(s3DriverConstructor, skipCheck) - - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // "accesskey": accessKey, - // "secretkey": secretKey, - // "region": region.Name, - // "bucket": bucket, - // "encrypt": encrypt, - // }, skipCheck) - // } -} - -func RegisterS3DriverSuite(s3DriverConstructor S3DriverConstructor, skipCheck testsuites.SkipCheck) { - check.Suite(&S3DriverSuite{ - Constructor: s3DriverConstructor, - SkipCheck: skipCheck, - }) -} - -type S3DriverSuite struct { - Constructor S3DriverConstructor - testsuites.SkipCheck -} - -func (suite *S3DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } -} - -func (suite *S3DriverSuite) TestEmptyRootList(c *check.C) { validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } defer os.Remove(validRoot) - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) + rootedDriver, err := s3DriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } } diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 9185ebbc..96231480 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -22,9 +22,9 @@ import ( // Test hooks up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -// RegisterInProcessSuite registers an in-process storage driver test suite with +// RegisterSuite registers an in-process storage driver test suite with // the go test runner. -func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { +func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { check.Suite(&DriverSuite{ Constructor: driverConstructor, SkipCheck: skipCheck, @@ -32,39 +32,6 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC }) } -// RegisterIPCSuite registers a storage driver test suite which runs the named -// driver as a child process with the given parameters. -func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { - panic("ipc testing is disabled for now") - - // NOTE(stevvooe): IPC testing is disabled for now. Uncomment the code - // block before and remove the panic when we phase it back in. - - // suite := &DriverSuite{ - // Constructor: func() (storagedriver.StorageDriver, error) { - // d, err := ipc.NewDriverClient(driverName, ipcParams) - // if err != nil { - // return nil, err - // } - // err = d.Start() - // if err != nil { - // return nil, err - // } - // return d, nil - // }, - // SkipCheck: skipCheck, - // } - // suite.Teardown = func() error { - // if suite.StorageDriver == nil { - // return nil - // } - - // driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) - // return driverClient.Stop() - // } - // check.Suite(suite) -} - // SkipCheck is a function used to determine if a test suite should be skipped. // If a SkipCheck returns a non-empty skip reason, the suite is skipped with // the given reason. @@ -82,9 +49,8 @@ type DriverConstructor func() (storagedriver.StorageDriver, error) type DriverTeardown func() error // DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. -// The intended way to create a DriverSuite is with RegisterInProcessSuite or -// RegisterIPCSuite. +// storagedriver.StorageDriver. The intended way to create a DriverSuite is +// with RegisterSuite. type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown @@ -841,10 +807,6 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { // TestConcurrentFileStreams checks that multiple *os.File objects can be passed // in to WriteStream concurrently without hanging. func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { - // c.Skip("Need to fix out-of-process concurrency") - // } - numStreams := 32 if testing.Short() { From fa17f9254f4be558dde6cc8107e07334317fc817 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 30 Jun 2015 10:28:14 -0700 Subject: [PATCH 170/501] Pass correct context into tracer Signed-off-by: Richard Scothern --- docs/storage/driver/base/base.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index ae28b187..60af06b8 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -64,7 +64,7 @@ func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { - ctx, done := context.WithTrace(context.Background()) + ctx, done := context.WithTrace(ctx) defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { @@ -76,7 +76,7 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - ctx, done := context.WithTrace(context.Background()) + ctx, done := context.WithTrace(ctx) defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { From 855ecb8440bd1c8e52198a8d9e50d5bbbcc7c507 Mon Sep 17 00:00:00 2001 From: Jon Poler Date: Mon, 22 Jun 2015 20:59:28 -0700 Subject: [PATCH 171/501] Remove timeout for http.Client in registry/client/repository.go. Timeouts should not be a discrete period of time, because they end up being arbitrary and may be difficult to gauge correctly against very large Docker layers. Rather, timeouts should be set at the transport level using the SetDeadline attribute on a net.Conn object. Signed-off-by: Jon Poler --- docs/client/repository.go | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 807ce9a6..4a66f70b 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -34,7 +34,6 @@ func NewRepository(ctx context.Context, name, baseURL string, transport http.Rou client := &http.Client{ Transport: transport, - Timeout: 1 * time.Minute, // TODO(dmcgowan): create cookie jar } From b66ee14e624e57ff7810938568bc21917c05793d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 21 May 2015 11:14:46 -0700 Subject: [PATCH 172/501] Refactor client auth Move client auth into a separate package. Separate ping from the authorizer and export Challenges type. Signed-off-by: Derek McGowan (github: dmcgowan) --- .../{transport => auth}/authchallenge.go | 52 +++++++-- .../{transport => auth}/authchallenge_test.go | 4 +- docs/client/{transport => auth}/session.go | 102 +++++------------- .../{transport => auth}/session_test.go | 55 ++++++---- 4 files changed, 109 insertions(+), 104 deletions(-) rename docs/client/{transport => auth}/authchallenge.go (67%) rename docs/client/{transport => auth}/authchallenge_test.go (95%) rename docs/client/{transport => auth}/session.go (69%) rename docs/client/{transport => auth}/session_test.go (84%) diff --git a/docs/client/transport/authchallenge.go b/docs/client/auth/authchallenge.go similarity index 67% rename from docs/client/transport/authchallenge.go rename to docs/client/auth/authchallenge.go index fffd560b..e3abfb11 100644 --- a/docs/client/transport/authchallenge.go +++ b/docs/client/auth/authchallenge.go @@ -1,6 +1,7 @@ -package transport +package auth import ( + "fmt" "net/http" "strings" ) @@ -8,10 +9,13 @@ import ( // Octet types from RFC 2616. type octetType byte -// authorizationChallenge carries information -// from a WWW-Authenticate response header. -type authorizationChallenge struct { - Scheme string +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 Parameters map[string]string } @@ -54,12 +58,44 @@ func init() { } } -func parseAuthHeader(header http.Header) map[string]authorizationChallenge { - challenges := map[string]authorizationChallenge{} +// Ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func Ping(client *http.Client, endpoint, versionHeader string) ([]Challenge, []string, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, nil, err + } + + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + versions := []string{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + versions = append(versions, strings.Fields(supportedVersions)...) + } + } + + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header), versions, nil + } else if resp.StatusCode != http.StatusOK { + return nil, versions, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) + } + + return nil, versions, nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { - challenges[v] = authorizationChallenge{Scheme: v, Parameters: p} + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) } } return challenges diff --git a/docs/client/transport/authchallenge_test.go b/docs/client/auth/authchallenge_test.go similarity index 95% rename from docs/client/transport/authchallenge_test.go rename to docs/client/auth/authchallenge_test.go index 45c932b9..9b6a5adc 100644 --- a/docs/client/transport/authchallenge_test.go +++ b/docs/client/auth/authchallenge_test.go @@ -1,4 +1,4 @@ -package transport +package auth import ( "net/http" @@ -13,7 +13,7 @@ func TestAuthChallengeParse(t *testing.T) { if len(challenges) != 1 { t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) } - challenge := challenges["bearer"] + challenge := challenges[0] if expected := "bearer"; challenge.Scheme != expected { t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) diff --git a/docs/client/transport/session.go b/docs/client/auth/session.go similarity index 69% rename from docs/client/transport/session.go rename to docs/client/auth/session.go index 90c8082c..5512a9a1 100644 --- a/docs/client/transport/session.go +++ b/docs/client/auth/session.go @@ -1,4 +1,4 @@ -package transport +package auth import ( "encoding/json" @@ -9,6 +9,8 @@ import ( "strings" "sync" "time" + + "github.com/docker/distribution/registry/client/transport" ) // AuthenticationHandler is an interface for authorizing a request from @@ -32,71 +34,24 @@ type CredentialStore interface { // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. -func NewAuthorizer(transport http.RoundTripper, handlers ...AuthenticationHandler) RequestModifier { - return &tokenAuthorizer{ - challenges: map[string]map[string]authorizationChallenge{}, +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(challengeMap map[string][]Challenge, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: challengeMap, handlers: handlers, - transport: transport, } } -type tokenAuthorizer struct { - challenges map[string]map[string]authorizationChallenge +type endpointAuthorizer struct { + challenges map[string][]Challenge handlers []AuthenticationHandler transport http.RoundTripper } -func (ta *tokenAuthorizer) ping(endpoint string) (map[string]authorizationChallenge, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: ta.transport, - // Ping should fail fast - Timeout: 5 * time.Second, - } - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // TODO(dmcgowan): Add version string which would allow skipping this section - var supportsV2 bool -HeaderLoop: - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { - for _, versionName := range strings.Fields(supportedVersions) { - if versionName == "registry/2.0" { - supportsV2 = true - break HeaderLoop - } - } - } - - if !supportsV2 { - return nil, fmt.Errorf("%s does not appear to be a v2 registry endpoint", endpoint) - } - - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header), nil - } else if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) - } - - return nil, nil -} - -func (ta *tokenAuthorizer) ModifyRequest(req *http.Request) error { +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { v2Root := strings.Index(req.URL.Path, "/v2/") - // Test if /v2/ does not exist or not at beginning - // TODO(dmcgowan) support v2 endpoints which have a prefix before /v2/ - if v2Root == -1 || v2Root > 0 { + if v2Root == -1 { return nil } @@ -108,19 +63,16 @@ func (ta *tokenAuthorizer) ModifyRequest(req *http.Request) error { pingEndpoint := ping.String() - challenges, ok := ta.challenges[pingEndpoint] + challenges, ok := ea.challenges[pingEndpoint] if !ok { - var err error - challenges, err = ta.ping(pingEndpoint) - if err != nil { - return err - } - ta.challenges[pingEndpoint] = challenges + return nil } - for _, handler := range ta.handlers { - challenge, ok := challenges[handler.Scheme()] - if ok { + for _, handler := range ea.handlers { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { + continue + } if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { return err } @@ -133,7 +85,7 @@ func (ta *tokenAuthorizer) ModifyRequest(req *http.Request) error { type tokenHandler struct { header http.Header creds CredentialStore - scope TokenScope + scope tokenScope transport http.RoundTripper tokenLock sync.Mutex @@ -141,25 +93,29 @@ type tokenHandler struct { tokenExpiration time.Time } -// TokenScope represents the scope at which a token will be requested. +// tokenScope represents the scope at which a token will be requested. // This represents a specific action on a registry resource. -type TokenScope struct { +type tokenScope struct { Resource string Scope string Actions []string } -func (ts TokenScope) String() string { +func (ts tokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope TokenScope) AuthenticationHandler { +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { return &tokenHandler{ transport: transport, creds: creds, - scope: scope, + scope: tokenScope{ + Resource: "repository", + Scope: scope, + Actions: actions, + }, } } diff --git a/docs/client/transport/session_test.go b/docs/client/auth/session_test.go similarity index 84% rename from docs/client/transport/session_test.go rename to docs/client/auth/session_test.go index 374d6e79..f16836da 100644 --- a/docs/client/transport/session_test.go +++ b/docs/client/auth/session_test.go @@ -1,4 +1,4 @@ -package transport +package auth import ( "encoding/base64" @@ -8,6 +8,7 @@ import ( "net/url" "testing" + "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/testutil" ) @@ -67,17 +68,6 @@ func TestEndpointAuthorizeToken(t *testing.T) { repo2 := "other/registry" scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) - tokenScope1 := TokenScope{ - Resource: "repository", - Scope: repo1, - Actions: []string{"pull", "push"}, - } - tokenScope2 := TokenScope{ - Resource: "repository", - Scope: repo2, - Actions: []string{"pull", "push"}, - } - tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ @@ -122,7 +112,14 @@ func TestEndpointAuthorizeToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - transport1 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, nil, tokenScope1))) + challenges1, _, err := Ping(&http.Client{}, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + challengeMap1 := map[string][]Challenge{ + e + "/v2/": challenges1, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) @@ -141,7 +138,14 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() - transport2 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, nil, tokenScope2))) + challenges2, _, err := Ping(&http.Client{}, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + challengeMap2 := map[string][]Challenge{ + e + "/v2/": challenges2, + } + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeMap2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) client2 := &http.Client{Transport: transport2} req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) @@ -166,11 +170,6 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { scope := fmt.Sprintf("repository:%s:pull,push", repo) username := "tokenuser" password := "superSecretPa$$word" - tokenScope := TokenScope{ - Resource: "repository", - Scope: repo, - Actions: []string{"pull", "push"}, - } tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { @@ -216,7 +215,14 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { password: password, } - transport1 := NewTransport(nil, NewAuthorizer(nil, NewTokenHandler(nil, creds, tokenScope), NewBasicHandler(creds))) + challenges, _, err := Ping(&http.Client{}, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + challengeMap := map[string][]Challenge{ + e + "/v2/": challenges, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) @@ -256,7 +262,14 @@ func TestEndpointAuthorizeBasic(t *testing.T) { password: password, } - transport1 := NewTransport(nil, NewAuthorizer(nil, NewBasicHandler(creds))) + challenges, _, err := Ping(&http.Client{}, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + challengeMap := map[string][]Challenge{ + e + "/v2/": challenges, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) From 5a3a9c6a77f04c9f2358f7f3f2e351760ad6f1bd Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 15 Jun 2015 16:10:48 -0700 Subject: [PATCH 173/501] Separate version and challenge parsing from ping Replace ping logic with individual functions to extract API version and authorization challenges. The response from a ping operation can be passed into these function. If an error occurs in parsing, the version or challenge will not be used. Sending the ping request is the responsibility of the caller. APIVersion has been converted from a string to a structure type. A parse function was added to convert from string to the structure type. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/api_version.go | 58 +++++++++++++++++++++++++++++++ docs/client/auth/authchallenge.go | 38 +++++--------------- docs/client/auth/session_test.go | 43 +++++++++++++++++++---- 3 files changed, 104 insertions(+), 35 deletions(-) create mode 100644 docs/client/auth/api_version.go diff --git a/docs/client/auth/api_version.go b/docs/client/auth/api_version.go new file mode 100644 index 00000000..df095474 --- /dev/null +++ b/docs/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the vesion of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/docs/client/auth/authchallenge.go b/docs/client/auth/authchallenge.go index e3abfb11..5d371646 100644 --- a/docs/client/auth/authchallenge.go +++ b/docs/client/auth/authchallenge.go @@ -1,14 +1,10 @@ package auth import ( - "fmt" "net/http" "strings" ) -// Octet types from RFC 2616. -type octetType byte - // Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. type Challenge struct { @@ -19,6 +15,9 @@ type Challenge struct { Parameters map[string]string } +// Octet types from RFC 2616. +type octetType byte + var octetTypes [256]octetType const ( @@ -58,36 +57,17 @@ func init() { } } -// Ping pings the provided endpoint to determine its required authorization challenges. -// If a version header is provided, the versions will be returned. -func Ping(client *http.Client, endpoint, versionHeader string) ([]Challenge, []string, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, nil, err - } - - resp, err := client.Do(req) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - versions := []string{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - versions = append(versions, strings.Fields(supportedVersions)...) - } - } - +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. - return parseAuthHeader(resp.Header), versions, nil - } else if resp.StatusCode != http.StatusOK { - return nil, versions, fmt.Errorf("unable to get valid ping response: %d", resp.StatusCode) + return parseAuthHeader(resp.Header) } - return nil, versions, nil + return nil } func parseAuthHeader(header http.Header) []Challenge { diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go index f16836da..3d19d4a7 100644 --- a/docs/client/auth/session_test.go +++ b/docs/client/auth/session_test.go @@ -42,8 +42,9 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au wrapper := &testAuthenticationWrapper{ headers: http.Header(map[string][]string{ - "Docker-Distribution-API-Version": {"registry/2.0"}, - "WWW-Authenticate": {authenticate}, + "X-API-Version": {"registry/2.0"}, + "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, + "WWW-Authenticate": {authenticate}, }), authCheck: authCheck, next: h, @@ -53,6 +54,18 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au return s.URL, s.Close } +// ping pings the provided endpoint to determine its required authorization challenges. +// If a version header is provided, the versions will be returned. +func ping(endpoint, versionHeader string) ([]Challenge, []APIVersion, error) { + resp, err := http.Get(endpoint) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + return ResponseChallenges(resp), APIVersions(resp, versionHeader), err +} + type testCredentialStore struct { username string password string @@ -112,10 +125,16 @@ func TestEndpointAuthorizeToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - challenges1, _, err := Ping(&http.Client{}, e+"/v2/", "") + challenges1, versions, err := ping(e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } challengeMap1 := map[string][]Challenge{ e + "/v2/": challenges1, } @@ -138,10 +157,22 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() - challenges2, _, err := Ping(&http.Client{}, e+"/v2/", "") + challenges2, versions, err := ping(e+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) } + if len(versions) != 3 { + t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) + } + if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) + } challengeMap2 := map[string][]Challenge{ e + "/v2/": challenges2, } @@ -215,7 +246,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { password: password, } - challenges, _, err := Ping(&http.Client{}, e+"/v2/", "") + challenges, _, err := ping(e+"/v2/", "") if err != nil { t.Fatal(err) } @@ -262,7 +293,7 @@ func TestEndpointAuthorizeBasic(t *testing.T) { password: password, } - challenges, _, err := Ping(&http.Client{}, e+"/v2/", "") + challenges, _, err := ping(e+"/v2/", "") if err != nil { t.Fatal(err) } From 376cc5fe756175bc1efbd1aeb99d53cfa24252ba Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 30 Jun 2015 10:56:29 -0700 Subject: [PATCH 174/501] Add challenge manager interface Challenger manager interface is used to handle getting authorization challenges from an endpoint as well as extracting challenges from responses. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/authchallenge.go | 53 +++++++++++++++++++++++++++++++ docs/client/auth/session.go | 28 ++++++++-------- docs/client/auth/session_test.go | 42 +++++++++++------------- 3 files changed, 87 insertions(+), 36 deletions(-) diff --git a/docs/client/auth/authchallenge.go b/docs/client/auth/authchallenge.go index 5d371646..a6ad45d8 100644 --- a/docs/client/auth/authchallenge.go +++ b/docs/client/auth/authchallenge.go @@ -1,7 +1,9 @@ package auth import ( + "fmt" "net/http" + "net/url" "strings" ) @@ -15,6 +17,57 @@ type Challenge struct { Parameters map[string]string } +// ChallengeManager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type ChallengeManager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint string) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleChallengeManager returns an instance of +// ChallengeManger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleChallengeManager() ChallengeManager { + return simpleChallengeManager{} +} + +type simpleChallengeManager map[string][]Challenge + +func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { + challenges := m[endpoint] + return challenges, nil +} + +func (m simpleChallengeManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + m[urlCopy.String()] = challenges + + return nil +} + // Octet types from RFC 2616. type octetType byte diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 5512a9a1..27e1d9e3 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -36,15 +36,15 @@ type CredentialStore interface { // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for // a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(challengeMap map[string][]Challenge, handlers ...AuthenticationHandler) transport.RequestModifier { +func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { return &endpointAuthorizer{ - challenges: challengeMap, + challenges: manager, handlers: handlers, } } type endpointAuthorizer struct { - challenges map[string][]Challenge + challenges ChallengeManager handlers []AuthenticationHandler transport http.RoundTripper } @@ -63,18 +63,20 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { pingEndpoint := ping.String() - challenges, ok := ea.challenges[pingEndpoint] - if !ok { - return nil + challenges, err := ea.challenges.GetChallenges(pingEndpoint) + if err != nil { + return err } - for _, handler := range ea.handlers { - for _, challenge := range challenges { - if challenge.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { - return err + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, challenge := range challenges { + if challenge.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { + return err + } } } } diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go index 3d19d4a7..1b4754ab 100644 --- a/docs/client/auth/session_test.go +++ b/docs/client/auth/session_test.go @@ -56,14 +56,18 @@ func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, au // ping pings the provided endpoint to determine its required authorization challenges. // If a version header is provided, the versions will be returned. -func ping(endpoint, versionHeader string) ([]Challenge, []APIVersion, error) { +func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { resp, err := http.Get(endpoint) if err != nil { - return nil, nil, err + return nil, err } defer resp.Body.Close() - return ResponseChallenges(resp), APIVersions(resp, versionHeader), err + if err := manager.AddResponse(resp); err != nil { + return nil, err + } + + return APIVersions(resp, versionHeader), err } type testCredentialStore struct { @@ -125,7 +129,8 @@ func TestEndpointAuthorizeToken(t *testing.T) { e, c := testServerWithAuth(m, authenicate, validCheck) defer c() - challenges1, versions, err := ping(e+"/v2/", "x-api-version") + challengeManager1 := NewSimpleChallengeManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) } @@ -135,10 +140,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) } - challengeMap1 := map[string][]Challenge{ - e + "/v2/": challenges1, - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) @@ -157,7 +159,8 @@ func TestEndpointAuthorizeToken(t *testing.T) { e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() - challenges2, versions, err := ping(e+"/v2/", "x-multi-api-version") + challengeManager2 := NewSimpleChallengeManager() + versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) } @@ -173,10 +176,7 @@ func TestEndpointAuthorizeToken(t *testing.T) { if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) } - challengeMap2 := map[string][]Challenge{ - e + "/v2/": challenges2, - } - transport2 := transport.NewTransport(nil, NewAuthorizer(challengeMap2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) client2 := &http.Client{Transport: transport2} req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) @@ -246,14 +246,12 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { password: password, } - challenges, _, err := ping(e+"/v2/", "") + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } - challengeMap := map[string][]Challenge{ - e + "/v2/": challenges, - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) @@ -293,14 +291,12 @@ func TestEndpointAuthorizeBasic(t *testing.T) { password: password, } - challenges, _, err := ping(e+"/v2/", "") + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } - challengeMap := map[string][]Challenge{ - e + "/v2/": challenges, - } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeMap, NewBasicHandler(creds))) + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) From 970efb6ba7550d5abab99f4d9d7541daba67ca0a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 8 Jul 2015 11:02:47 -0700 Subject: [PATCH 175/501] Fix typo in Version doc Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/api_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/client/auth/api_version.go b/docs/client/auth/api_version.go index df095474..7d8f1d95 100644 --- a/docs/client/auth/api_version.go +++ b/docs/client/auth/api_version.go @@ -12,7 +12,7 @@ type APIVersion struct { // such as "registry" Type string - // Version is the vesion of the API specification implemented, + // Version is the version of the API specification implemented, // This may omit the revision number and only include // the major and minor version, such as "2.0" Version string From a58848a0b7230492127240caa23a278f0004b835 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 10 Jul 2015 12:00:06 -0600 Subject: [PATCH 176/501] Allow single character repository names The main goal of this changeset is to allow repository name components to consist of a single character. The number of components allowed and the slash separation requirements have also been clarified. To go along with this simplification, errant constants and unneeded error types have been removed. Signed-off-by: Stephen J Day --- docs/api/v2/names.go | 39 +++++++++++--------------------------- docs/api/v2/names_test.go | 31 ++++++++++++++++++++++-------- docs/api/v2/routes_test.go | 3 +++ 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index 19cb72a0..14b7ea60 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -6,19 +6,10 @@ import ( "strings" ) -// TODO(stevvooe): Move these definitions back to an exported package. While -// they are used with v2 definitions, their relevance expands beyond. -// "distribution/names" is a candidate package. +// TODO(stevvooe): Move these definitions to the future "reference" package. +// While they are used with v2 definitions, their relevance expands beyond. const ( - // RepositoryNameComponentMinLength is the minimum number of characters in a - // single repository name slash-delimited component - RepositoryNameComponentMinLength = 2 - - // RepositoryNameMinComponents is the minimum number of slash-delimited - // components that a repository name must have - RepositoryNameMinComponents = 1 - // RepositoryNameTotalLengthMax is the maximum total number of characters in // a repository name RepositoryNameTotalLengthMax = 255 @@ -40,17 +31,13 @@ var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentReg // TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) -// TODO(stevvooe): Contribute these exports back to core, so they are shared. +// TagNameAnchoredRegexp matches valid tag names, anchored at the start and +// end of the matched string. +var TagNameAnchoredRegexp = regexp.MustCompile("^" + TagNameRegexp.String() + "$") var ( - // ErrRepositoryNameComponentShort is returned when a repository name - // contains a component which is shorter than - // RepositoryNameComponentMinLength - ErrRepositoryNameComponentShort = fmt.Errorf("repository name component must be %v or more characters", RepositoryNameComponentMinLength) - - // ErrRepositoryNameMissingComponents is returned when a repository name - // contains fewer than RepositoryNameMinComponents components - ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + // ErrRepositoryNameEmpty is returned for empty, invalid repository names. + ErrRepositoryNameEmpty = fmt.Errorf("repository name must have at least one component") // ErrRepositoryNameLong is returned when a repository name is longer than // RepositoryNameTotalLengthMax @@ -76,21 +63,17 @@ var ( // The result of the production, known as the "namespace", should be limited // to 255 characters. func ValidateRepositoryName(name string) error { + if name == "" { + return ErrRepositoryNameEmpty + } + if len(name) > RepositoryNameTotalLengthMax { return ErrRepositoryNameLong } components := strings.Split(name, "/") - if len(components) < RepositoryNameMinComponents { - return ErrRepositoryNameMissingComponents - } - for _, component := range components { - if len(component) < RepositoryNameComponentMinLength { - return ErrRepositoryNameComponentShort - } - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { return ErrRepositoryNameComponentInvalid } diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 0975fb7c..51e0ba8b 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -1,6 +1,7 @@ package v2 import ( + "strconv" "strings" "testing" ) @@ -10,6 +11,10 @@ func TestRepositoryNameRegexp(t *testing.T) { input string err error }{ + { + input: "", + err: ErrRepositoryNameEmpty, + }, { input: "short", }, @@ -30,11 +35,26 @@ func TestRepositoryNameRegexp(t *testing.T) { }, { input: "a/a/a/b/b", - err: ErrRepositoryNameComponentShort, }, { input: "a/a/a/a/", - err: ErrRepositoryNameComponentShort, + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "a//a/a", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "a", + }, + { + input: "a/aa", + }, + { + input: "aa/a", + }, + { + input: "a/aa/a", }, { input: "foo.com/bar/baz", @@ -58,10 +78,6 @@ func TestRepositoryNameRegexp(t *testing.T) { { input: "a-a/a-a", }, - { - input: "a", - err: ErrRepositoryNameComponentShort, - }, { input: "a-/a/a/a", err: ErrRepositoryNameComponentInvalid, @@ -110,9 +126,8 @@ func TestRepositoryNameRegexp(t *testing.T) { err: ErrRepositoryNameComponentInvalid, }, } { - failf := func(format string, v ...interface{}) { - t.Logf(testcase.input+": "+format, v...) + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index fb268336..9fd29a4f 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -263,6 +263,7 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee } if testcase.StatusCode != http.StatusOK { + resp.Body.Close() // We don't care about json response. continue } @@ -291,6 +292,8 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } + + resp.Body.Close() } } From 6f2f84996d239531cd32a4bebbea208153466a20 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 14 Jul 2015 11:14:09 -0700 Subject: [PATCH 177/501] Fix build when using build tag 'noresumabledigest' Signed-off-by: Richard Scothern --- docs/storage/blobwriter.go | 33 ++++++++++++++++++++++++++++ docs/storage/blobwriter_resumable.go | 32 --------------------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 6a37e81d..4189d517 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "path" "time" "github.com/Sirupsen/logrus" @@ -311,3 +312,35 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor return bw.blobStore.driver.Move(ctx, bw.path, blobPath) } + +// removeResources should clean up all resources associated with the upload +// instance. An error will be returned if the clean up cannot proceed. If the +// resources are already not present, no error will be returned. +func (bw *blobWriter) removeResources(ctx context.Context) error { + dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + name: bw.blobStore.repository.Name(), + id: bw.id, + }) + + if err != nil { + return err + } + + // Resolve and delete the containing directory, which should include any + // upload related files. + dirPath := path.Dir(dataPath) + if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // already gone! + default: + // This should be uncommon enough such that returning an error + // should be okay. At this point, the upload should be mostly + // complete, but perhaps the backend became unaccessible. + context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + return err + } + } + + return nil +} diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index af847888..c2ab2123 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -100,38 +100,6 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { return nil } -// removeResources should clean up all resources associated with the upload -// instance. An error will be returned if the clean up cannot proceed. If the -// resources are already not present, no error will be returned. -func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), - id: bw.id, - }) - - if err != nil { - return err - } - - // Resolve and delete the containing directory, which should include any - // upload related files. - dirPath := path.Dir(dataPath) - if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - break // already gone! - default: - // This should be uncommon enough such that returning an error - // should be okay. At this point, the upload should be mostly - // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) - return err - } - } - - return nil -} - type hashStateEntry struct { offset int64 path string From caf989a5723711f66bf8d797f7f08425374763ab Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 14 Jul 2015 16:25:37 -0700 Subject: [PATCH 178/501] Allow conditional fetching of manifests with the registry client. Add a functional argument to pass a digest to (ManifestService).GetByTag(). If the digest matches an empty manifest and nil error are returned. See 6bedf7d1cd00223b0f3e81eabf78dbd2148382a7 for server implementation. Signed-off-by: Richard Scothern --- docs/client/repository.go | 36 ++++++++++++++++++++-- docs/client/repository_test.go | 56 +++++++++++++++++++++++++--------- docs/storage/manifeststore.go | 9 +++++- 3 files changed, 83 insertions(+), 18 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 4a66f70b..1f360ec8 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -75,6 +75,7 @@ func (r *repository) Manifests() distribution.ManifestService { name: r.Name(), ub: r.ub, client: r.client, + etags: make(map[string]string), } } @@ -104,6 +105,7 @@ type manifests struct { name string ub *v2.URLBuilder client *http.Client + etags map[string]string } func (ms *manifests) Tags() ([]string, error) { @@ -173,13 +175,40 @@ func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { return ms.GetByTag(dgst.String()) } -func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) { +// AddEtagToTag allows a client to supply an eTag to GetByTag which will +// be used for a conditional HTTP request. If the eTag matches, a nil +// manifest and nil error will be returned. +func AddEtagToTag(tagName, dgst string) distribution.ManifestServiceOption { + return func(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[tagName] = dgst + return nil + } + return fmt.Errorf("etag options is a client-only option") + } +} + +func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + u, err := ms.ub.BuildManifestURL(ms.name, tag) if err != nil { return nil, err } + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } - resp, err := ms.client.Get(u) + if _, ok := ms.etags[tag]; ok { + req.Header.Set("eTag", ms.etags[tag]) + } + resp, err := ms.client.Do(req) if err != nil { return nil, err } @@ -193,8 +222,9 @@ func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) { if err := decoder.Decode(&sm); err != nil { return nil, err } - return &sm, nil + case http.StatusNotModified: + return nil, nil default: return nil, handleErrorResponse(resp) } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 7dbe97cf..26d92d8e 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -46,6 +46,7 @@ func newRandomBlob(size int) (digest.Digest, []byte) { } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -60,6 +61,7 @@ func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.R }), }, }) + *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", @@ -398,6 +400,40 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.Signe return m, dgst } +func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest, _ := digest.FromBytes(content) + getReqWithEtag := testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/manifests/" + reference, + Headers: http.Header(map[string][]string{ + "Etag": {dgst}, + }), + } + + var getRespWithEtag testutil.Response + if actualDigest.String() == dgst { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusNotModified, + Body: []byte{}, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + } else { + getRespWithEtag = testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + } + + } + *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) +} + func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -487,11 +523,11 @@ func TestManifestFetch(t *testing.T) { } } -func TestManifestFetchByTag(t *testing.T) { +func TestManifestFetchWithEtag(t *testing.T) { repo := "test.example.com/repo/by/tag" - m1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + m1, d1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifest(repo, "latest", m1.Raw, &m) + addTestManifestWithEtag(repo, "latest", m1.Raw, &m, d1.String()) e, c := testServer(m) defer c() @@ -502,20 +538,12 @@ func TestManifestFetchByTag(t *testing.T) { } ms := r.Manifests() - ok, err := ms.ExistsByTag("latest") + m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) if err != nil { t.Fatal(err) } - if !ok { - t.Fatal("Manifest does not exist") - } - - manifest, err := ms.GetByTag("latest") - if err != nil { - t.Fatal(err) - } - if err := checkEqualManifest(manifest, m1); err != nil { - t.Fatal(err) + if m2 != nil { + t.Fatal("Expected empty manifest for matching etag") } } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 07f8de3c..8f6c3562 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -73,7 +73,14 @@ func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { return ms.tagStore.exists(tag) } -func (ms *manifestStore) GetByTag(tag string) (*manifest.SignedManifest, error) { +func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") dgst, err := ms.tagStore.resolve(tag) if err != nil { From db30d384e059883556813606226ceb30380e1961 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 17 Jun 2015 17:39:27 -0700 Subject: [PATCH 179/501] Add ability to pass in substitution args into an Error Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 80 +++++++++++++++++++++------------ docs/api/errcode/errors_test.go | 45 ++++++++++++++++++- docs/client/blob_writer_test.go | 8 ++-- docs/client/errors.go | 12 +++-- docs/client/repository_test.go | 6 +-- 5 files changed, 111 insertions(+), 40 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index a68aaad5..d221cb67 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -69,20 +69,28 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { - if err, ok := detail.(error); ok { - detail = err.Error() - } - return Error{ - Code: ec, - Detail: detail, - } + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) } // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { - Code ErrorCode `json:"code"` - Detail interface{} `json:"detail,omitempty"` + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user } // ErrorCode returns the ID/Value of this Error @@ -97,9 +105,24 @@ func (e Error) Error() string { e.Code.Message()) } -// Message returned the human-readable error message for this Error -func (e Error) Message() string { - return e.Code.Message() +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } } // ErrorDescriptor provides relevant information about a given error code. @@ -160,20 +183,11 @@ func (errs Errors) Len() int { return len(errs) } -// jsonError extends Error with 'Message' so that we can include the -// error text, just in case the receiver of the JSON doesn't have this -// particular ErrorCode registered -type jsonError struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` -} - // MarshalJSON converts slice of error, ErrorCode or Error into a // slice of Error - then serializes func (errs Errors) MarshalJSON() ([]byte, error) { var tmpErrs struct { - Errors []jsonError `json:"errors,omitempty"` + Errors []Error `json:"errors,omitempty"` } for _, daErr := range errs { @@ -189,9 +203,16 @@ func (errs Errors) MarshalJSON() ([]byte, error) { } - tmpErrs.Errors = append(tmpErrs.Errors, jsonError{ + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ Code: err.Code, - Message: err.Message(), + Message: msg, Detail: err.Detail, }) } @@ -203,7 +224,7 @@ func (errs Errors) MarshalJSON() ([]byte, error) { // Error or ErrorCode func (errs *Errors) UnmarshalJSON(data []byte) error { var tmpErrs struct { - Errors []jsonError + Errors []Error } if err := json.Unmarshal(data, &tmpErrs); err != nil { @@ -212,14 +233,17 @@ func (errs *Errors) UnmarshalJSON(data []byte) error { var newErrs Errors for _, daErr := range tmpErrs.Errors { - if daErr.Detail == nil { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { // Error's w/o details get converted to ErrorCode newErrs = append(newErrs, daErr.Code) } else { // Error's w/ details are untouched newErrs = append(newErrs, Error{ - Code: daErr.Code, - Detail: daErr.Detail, + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, }) } } diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index 684e263a..1f0aaf91 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -76,12 +76,21 @@ var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ HTTPStatusCode: http.StatusNotFound, }) +var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + func TestErrorsManagement(t *testing.T) { var errs Errors errs = append(errs, ErrorCodeTest1) errs = append(errs, ErrorCodeTest2.WithDetail( map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) + errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) p, err := json.Marshal(errs) @@ -89,7 +98,12 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "{\"errors\":[{\"code\":\"TEST1\",\"message\":\"test error 1\"},{\"code\":\"TEST2\",\"message\":\"test error 2\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" + expectedJSON := `{"errors":[` + + `{"code":"TEST1","message":"test error 1"},` + + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + + `]}` if string(p) != expectedJSON { t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) @@ -105,6 +119,13 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) } + // Test the arg substitution stuff + e1 := unmarshaled[3].(Error) + exp1 := `Sorry "BOOGIE" isn't valid` + if e1.Message != exp1 { + t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) + } + // Test again with a single value this time errs = Errors{ErrorCodeUnknown} expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" @@ -128,4 +149,26 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) } + // Verify that calling WithArgs() more than once does the right thing. + // Meaning creates a new Error and uses the ErrorCode Message + e1 = ErrorCodeTest3.WithArgs("test1") + e2 := e1.WithArgs("test2") + if &e1 == &e2 { + t.Fatalf("args: e2 and e1 should not be the same, but they are") + } + if e2.Message != `Sorry "test2" isn't valid` { + t.Fatalf("e2 had wrong message: %q", e2.Message) + } + + // Verify that calling WithDetail() more than once does the right thing. + // Meaning creates a new Error and overwrites the old detail field + e1 = ErrorCodeTest3.WithDetail("stuff1") + e2 = e1.WithDetail("stuff2") + if &e1 == &e2 { + t.Fatalf("detail: e2 and e1 should not be the same, but they are") + } + if e2.Detail != `stuff2` { + t.Fatalf("e2 had wrong detail: %q", e2.Detail) + } + } diff --git a/docs/client/blob_writer_test.go b/docs/client/blob_writer_test.go index e3c880e1..099dca4f 100644 --- a/docs/client/blob_writer_test.go +++ b/docs/client/blob_writer_test.go @@ -90,7 +90,7 @@ func TestUploadReadFrom(t *testing.T) { [ { "code": "BLOB_UPLOAD_INVALID", - "message": "invalid upload identifier", + "message": "blob upload invalid", "detail": "more detail" } ] @@ -174,11 +174,11 @@ func TestUploadReadFrom(t *testing.T) { if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) } - if expected := "blob upload invalid"; v2Err.Message() != expected { - t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Message(), expected) + if expected := "blob upload invalid"; v2Err.Message != expected { + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) } if expected := "more detail"; v2Err.Detail.(string) != expected { - t.Fatalf("Unexpected error message: %s, expected %s", v2Err.Detail.(string), expected) + t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) } } diff --git a/docs/client/errors.go b/docs/client/errors.go index e743533b..327fea6d 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -52,10 +52,14 @@ func handleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return &errcode.Error{ - Code: v2.ErrorCodeUnauthorized, - Detail: uErr.Response, - } + return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response) + /* + return &errcode.Error{ + Code: v2.ErrorCodeUnauthorized, + Message: v2.ErrorCodeUnauthorized.Message(), + Detail: uErr.Response, + } + */ } return err } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 7dbe97cf..ca31e40c 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -669,14 +669,14 @@ func TestManifestUnauthorized(t *testing.T) { if err == nil { t.Fatal("Expected error fetching manifest") } - v2Err, ok := err.(*errcode.Error) + v2Err, ok := err.(errcode.Error) if !ok { t.Fatalf("Unexpected error type: %#v", err) } if v2Err.Code != v2.ErrorCodeUnauthorized { t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) } - if expected := errcode.ErrorCode(v2.ErrorCodeUnauthorized).Message(); v2Err.Message() != expected { - t.Fatalf("Unexpected message value: %s, expected %s", v2Err.Message(), expected) + if expected := v2.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) } } From cd31d466e44186885e836b967cf346b2112feb7d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 15 Jun 2015 10:39:34 -0700 Subject: [PATCH 180/501] Allow Manifest Service to be configured with function arguments Signed-off-by: Richard Scothern --- docs/client/repository.go | 10 +++++--- docs/client/repository_test.go | 36 ++++++++++++++++++++++----- docs/handlers/images.go | 19 +++++++++------ docs/handlers/tags.go | 6 ++++- docs/storage/manifeststore.go | 39 +++++++++++++++++++----------- docs/storage/manifeststore_test.go | 12 ++++++--- docs/storage/registry.go | 22 ++++++++++++----- 7 files changed, 103 insertions(+), 41 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 1f360ec8..840a7af6 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -70,18 +70,20 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { } } -func (r *repository) Manifests() distribution.ManifestService { +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire return &manifests{ name: r.Name(), ub: r.ub, client: r.client, etags: make(map[string]string), - } + }, nil } func (r *repository) Signatures() distribution.SignatureService { + ms, _ := r.Manifests(r.context) return &signatures{ - manifests: r.Manifests(), + manifests: ms, } } @@ -236,6 +238,8 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error { return err } + // todo(richardscothern): do something with options here when they become applicable + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) if err != nil { return err diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index bf02ce27..642ef998 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -492,6 +492,7 @@ func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { } func TestManifestFetch(t *testing.T) { + ctx := context.Background() repo := "test.example.com/repo" m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap @@ -504,7 +505,10 @@ func TestManifestFetch(t *testing.T) { if err != nil { t.Fatal(err) } - ms := r.Manifests() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } ok, err := ms.Exists(dgst) if err != nil { @@ -536,8 +540,12 @@ func TestManifestFetchWithEtag(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } - ms := r.Manifests() m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) if err != nil { t.Fatal(err) @@ -572,8 +580,12 @@ func TestManifestDelete(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } - ms := r.Manifests() if err := ms.Delete(dgst1); err != nil { t.Fatal(err) } @@ -609,8 +621,12 @@ func TestManifestPut(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } - ms := r.Manifests() if err := ms.Put(m1); err != nil { t.Fatal(err) } @@ -653,8 +669,12 @@ func TestManifestTags(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } - ms := r.Manifests() tags, err := ms.Tags() if err != nil { t.Fatal(err) @@ -691,7 +711,11 @@ func TestManifestUnauthorized(t *testing.T) { if err != nil { t.Fatal(err) } - ms := r.Manifests() + ctx := context.Background() + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } _, err = ms.Get(dgst) if err == nil { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 747b2780..e5b0bc77 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -50,13 +50,13 @@ type imageManifestHandler struct { // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests := imh.Repository.Manifests() - - var ( - sm *manifest.SignedManifest - err error - ) + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + var sm *manifest.SignedManifest if imh.Tag != "" { sm, err = manifests.GetByTag(imh.Tag) } else { @@ -106,7 +106,12 @@ func etagMatch(r *http.Request, etag string) bool { // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") - manifests := imh.Repository.Manifests() + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + dec := json.NewDecoder(r.Body) var manifest manifest.SignedManifest diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 00f9760e..54725585 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -34,7 +34,11 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - manifests := th.Repository.Manifests() + manifests, err := th.Repository.Manifests(th) + if err != nil { + th.Errors = append(th.Errors, err) + return + } tags, err := manifests.Tags() if err != nil { diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 8f6c3562..27d6a9fa 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -11,10 +11,11 @@ import ( ) type manifestStore struct { - repository *repository - revisionStore *revisionStore - tagStore *tagStore - ctx context.Context + repository *repository + revisionStore *revisionStore + tagStore *tagStore + ctx context.Context + skipDependencyVerification bool } var _ distribution.ManifestService = &manifestStore{} @@ -39,10 +40,19 @@ func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, erro return ms.revisionStore.get(ms.ctx, dgst) } +// SkipLayerVerification allows a manifest to be Put before it's +// layers are on the filesystem +func SkipLayerVerification(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifeststore") +} + func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - // Verify the manifest. if err := ms.verifyManifest(ms.ctx, manifest); err != nil { return err } @@ -113,18 +123,19 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.Sig } } - for _, fsLayer := range mnfst.FSLayers { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } + if !ms.skipDependencyVerification { + for _, fsLayer := range mnfst.FSLayers { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) + } } } - if len(errs) != 0 { return errs } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 3422985a..55ea80ac 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -48,7 +48,11 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE func TestManifestStorage(t *testing.T) { env := newManifestStoreTestEnv(t, "foo/bar", "thetag") - ms := env.repository.Manifests() + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } exists, err := ms.ExistsByTag(env.tag) if err != nil { @@ -97,14 +101,14 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm, err := manifest.Sign(&m, pk) - if err != nil { + sm, merr := manifest.Sign(&m, pk) + if merr != nil { t.Fatalf("error signing manifest: %v", err) } err = ms.Put(sm) if err == nil { - t.Fatalf("expected errors putting manifest") + t.Fatalf("expected errors putting manifest with full verification") } switch err := err.(type) { diff --git a/docs/storage/registry.go b/docs/storage/registry.go index ff33f410..cf0fe3e7 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -99,15 +99,15 @@ func (repo *repository) Name() string { // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. -func (repo *repository) Manifests() distribution.ManifestService { - return &manifestStore{ - ctx: repo.ctx, +func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + ms := &manifestStore{ + ctx: ctx, repository: repo, revisionStore: &revisionStore{ - ctx: repo.ctx, + ctx: ctx, repository: repo, blobStore: &linkedBlobStore{ - ctx: repo.ctx, + ctx: ctx, blobStore: repo.blobStore, repository: repo, statter: &linkedBlobStatter{ @@ -122,11 +122,21 @@ func (repo *repository) Manifests() distribution.ManifestService { }, }, tagStore: &tagStore{ - ctx: repo.ctx, + ctx: ctx, repository: repo, blobStore: repo.registry.blobStore, }, } + + // Apply options + for _, option := range options { + err := option(ms) + if err != nil { + return nil, err + } + } + + return ms, nil } // Blobs returns an instance of the BlobStore. Instantiation is cheap and From 81c21411e89a491ada9d8759824040ee032bf3d3 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 16 Jul 2015 11:35:02 -0700 Subject: [PATCH 181/501] Export ServeJSON for serving error codes This changeset provides a common http handler for serving errcodes. This should unify http responses across webservices in the face of errors. Several type assertions have been added, as well, to ensure the error interface is implemented. Signed-off-by: Stephen J Day --- docs/api/errcode/errors.go | 6 +++++ docs/api/errcode/handler.go | 44 +++++++++++++++++++++++++++++++++++++ docs/handlers/app.go | 26 ++++++++++++---------- docs/handlers/helpers.go | 33 ---------------------------- 4 files changed, 65 insertions(+), 44 deletions(-) create mode 100644 docs/api/errcode/handler.go diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index d221cb67..acdeb022 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -16,6 +16,8 @@ type ErrorCoder interface { // and the integer format may change and should *never* be exported. type ErrorCode int +var _ error = ErrorCode(0) + // ErrorCode just returns itself func (ec ErrorCode) ErrorCode() ErrorCode { return ec @@ -93,6 +95,8 @@ type Error struct { // variable substitution right before showing the message to the user } +var _ error = Error{} + // ErrorCode returns the ID/Value of this Error func (e Error) ErrorCode() ErrorCode { return e.Code @@ -163,6 +167,8 @@ func ParseErrorCode(value string) ErrorCode { // for use within the application. type Errors []error +var _ error = Errors{} + func (errs Errors) Error() string { switch len(errs) { case 0: diff --git a/docs/api/errcode/handler.go b/docs/api/errcode/handler.go new file mode 100644 index 00000000..49a64a86 --- /dev/null +++ b/docs/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/docs/handlers/app.go b/docs/handlers/app.go index d3985067..c8c52362 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -379,7 +379,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) } - serveJSON(w, context.Errors) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return } @@ -393,7 +395,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - serveJSON(w, context.Errors) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return } } @@ -405,7 +409,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { if context.Errors.Len() > 0 { app.logError(context, context.Errors) - serveJSON(w, context.Errors) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } } }) } @@ -482,11 +488,9 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // base route is accessed. This section prevents us from making // that mistake elsewhere in the code, allowing any operation to // proceed. - - var errs errcode.Errors - errs = append(errs, v2.ErrorCodeUnauthorized) - - serveJSON(w, errs) + if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } return fmt.Errorf("forbidden: no repository name") } } @@ -498,9 +502,9 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // Add the appropriate WWW-Auth header err.ServeHTTP(w, r) - var errs errcode.Errors - errs = append(errs, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)) - serveJSON(w, errs) + if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } default: // This condition is a potential security problem either in // the configuration or whatever is backing the access diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index c72c5784..e2d220c2 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -1,43 +1,10 @@ package handlers import ( - "encoding/json" "io" "net/http" - - "github.com/docker/distribution/registry/api/errcode" ) -// serveJSON marshals v and sets the content-type header to -// 'application/json'. If a different status code is required, call -// ResponseWriter.WriteHeader before this function. -func serveJSON(w http.ResponseWriter, v interface{}) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - sc := http.StatusInternalServerError - - if errs, ok := v.(errcode.Errors); ok && len(errs) > 0 { - if err, ok := errs[0].(errcode.ErrorCoder); ok { - if sc2 := err.ErrorCode().Descriptor().HTTPStatusCode; sc2 != 0 { - sc = sc2 - } - } - } else if err, ok := v.(errcode.ErrorCoder); ok { - if sc2 := err.ErrorCode().Descriptor().HTTPStatusCode; sc2 != 0 { - sc = sc2 - } - } - - w.WriteHeader(sc) - - enc := json.NewEncoder(w) - - if err := enc.Encode(v); err != nil { - return err - } - - return nil -} - // closeResources closes all the provided resources after running the target // handler. func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { From 41aadeac9a5cb72682d6a6d4e36e57592b36909c Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 16 Jul 2015 12:40:31 -0700 Subject: [PATCH 182/501] Reduces log level of auth error lines from error->warn An error level log is already produced within app.authorized() if an actual unexpected error occurs during authorization, so this warning level log remains for auditability purposes, but should not be considered an error condition. Addresses #704 Signed-off-by: Brian Bland --- docs/handlers/app.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index d3985067..8f9e918d 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -359,7 +359,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context := app.context(w, r) if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Errorf("error authorizing context: %v", err) + ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) return } From feebd69d26df453d60427018af086198d800c71a Mon Sep 17 00:00:00 2001 From: yuzou Date: Fri, 17 Jul 2015 14:55:31 +0800 Subject: [PATCH 183/501] Close reader after the test is finished. Signed-off-by: yuzou --- docs/storage/driver/testsuites/testsuites.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 96231480..770c428c 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -258,6 +258,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) + defer reader.Close() writtenChecksum := sha1.New() io.Copy(writtenChecksum, reader) From 249ad3b76d33fe6584b5de4811d2bbc99bc3fe68 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 17 Jul 2015 17:07:11 -0700 Subject: [PATCH 184/501] Use "Size" field to describe blobs over "Length" After consideration, we've changed the main descriptor field name to for number of bytes to "size" to match convention. While this may be a subjective argument, commonly we refer to files by their "size" rather than their "length". This will match other conventions, like `(FileInfo).Size()` and methods on `io.SizeReaderAt`. Under more broad analysis, this argument doesn't necessarily hold up. If anything, "size" is shorter than "length". Signed-off-by: Stephen J Day --- docs/client/repository.go | 6 +++--- docs/client/repository_test.go | 16 ++++++++-------- docs/storage/blob_test.go | 10 +++++----- docs/storage/blobserver.go | 4 ++-- docs/storage/blobstore.go | 6 +++--- docs/storage/blobwriter.go | 10 +++++----- docs/storage/cache/cache.go | 4 ++-- docs/storage/cache/redis/redis.go | 11 +++++++---- docs/storage/cache/suite.go | 6 +++--- 9 files changed, 38 insertions(+), 35 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 840a7af6..fc90cb6e 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -343,7 +343,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea return nil, err } - return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Length), nil + return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { @@ -366,7 +366,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut desc := distribution.Descriptor{ MediaType: mediaType, - Length: int64(len(p)), + Size: int64(len(p)), Digest: dgstr.Digest(), } @@ -435,7 +435,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi return distribution.Descriptor{ MediaType: resp.Header.Get("Content-Type"), - Length: length, + Size: length, Digest: dgst, }, nil case http.StatusNotFound: diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 642ef998..3a91be98 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -127,8 +127,8 @@ func TestBlobExists(t *testing.T) { t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) } - if stat.Length != int64(len(b1)) { - t.Fatalf("Unexpected length: %d, expected %d", stat.Length, len(b1)) + if stat.Size != int64(len(b1)) { + t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) } // TODO(dmcgowan): Test error cases and ErrBlobUnknown case @@ -244,14 +244,14 @@ func TestBlobUploadChunked(t *testing.T) { blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, - Length: int64(len(b1)), + Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } - if blob.Length != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } } @@ -352,14 +352,14 @@ func TestBlobUploadMonolithic(t *testing.T) { blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, - Length: int64(len(b1)), + Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } - if blob.Length != int64(len(b1)) { - t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) + if blob.Size != int64(len(b1)) { + t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 569f756d..0dbfe810 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -178,7 +178,7 @@ func TestSimpleBlobRead(t *testing.T) { t.Fatalf("error getting seeker size for random layer: %v", err) } - descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Length: randomLayerSize} + descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} t.Logf("desc: %v", descBefore) desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) @@ -186,8 +186,8 @@ func TestSimpleBlobRead(t *testing.T) { t.Fatalf("error adding blob to blobservice: %v", err) } - if desc.Length != randomLayerSize { - t.Fatalf("committed blob has incorrect length: %v != %v", desc.Length, randomLayerSize) + if desc.Size != randomLayerSize { + t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) } rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. @@ -330,8 +330,8 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio if nn, err := io.Copy(wr, rd); err != nil { return distribution.Descriptor{}, err - } else if nn != desc.Length { - return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Length) + } else if nn != desc.Size { + return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) } return wr.Commit(ctx, desc) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index a7b42681..d0b3204c 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -41,7 +41,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) case driver.ErrUnsupportedMethod: // Fallback to serving the content directly. - br, err := newFileReader(ctx, bs.driver, path, desc.Length) + br, err := newFileReader(ctx, bs.driver, path, desc.Size) if err != nil { return err } @@ -61,7 +61,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h if w.Header().Get("Content-Length") == "" { // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Length)) + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) } http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index afe42847..484e2106 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -50,7 +50,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution return nil, err } - return newFileReader(ctx, bs.driver, path, desc.Length) + return newFileReader(ctx, bs.driver, path, desc.Size) } // Put stores the content p in the blob store, calculating the digest. If the @@ -81,7 +81,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr // TODO(stevvooe): Write out mediatype here, as well. return distribution.Descriptor{ - Length: int64(len(p)), + Size: int64(len(p)), // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value @@ -179,7 +179,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi // mediatype that overrides the main one. return distribution.Descriptor{ - Length: fi.Size(), + Size: fi.Size(), // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 4189d517..b39c851e 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -148,7 +148,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // NOTE(stevvooe): We really don't care if the file is // not actually present for the reader. We now assume // that the desc length is zero. - desc.Length = 0 + desc.Size = 0 default: // Any other error we want propagated up the stack. return distribution.Descriptor{}, err @@ -161,14 +161,14 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri bw.size = fi.Size() } - if desc.Length > 0 { - if desc.Length != bw.size { + if desc.Size > 0 { + if desc.Size != bw.size { return distribution.Descriptor{}, distribution.ErrBlobInvalidLength } } else { // if provided 0 or negative length, we can assume caller doesn't know or // care about length. - desc.Length = bw.size + desc.Size = bw.size } // TODO(stevvooe): This section is very meandering. Need to be broken down @@ -216,7 +216,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length) + fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } diff --git a/docs/storage/cache/cache.go b/docs/storage/cache/cache.go index 79e6d9c8..10a39091 100644 --- a/docs/storage/cache/cache.go +++ b/docs/storage/cache/cache.go @@ -23,8 +23,8 @@ func ValidateDescriptor(desc distribution.Descriptor) error { return err } - if desc.Length < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Length) + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) } if desc.MediaType == "" { diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go index 29bbe3bc..64010a09 100644 --- a/docs/storage/cache/redis/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -66,17 +66,20 @@ func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Di // stat provides an internal stat call that takes a connection parameter. This // allows some internal management of the connection scope. func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { - reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")) + reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) if err != nil { return distribution.Descriptor{}, err } - if len(reply) < 2 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil + // NOTE(stevvooe): The "size" field used to be "length". We treat a + // missing "size" field here as an unknown blob, which causes a cache + // miss, effectively migrating the field. + if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil return distribution.Descriptor{}, distribution.ErrBlobUnknown } var desc distribution.Descriptor - if _, err := redis.Scan(reply, &desc.Digest, &desc.Length, &desc.MediaType); err != nil { + if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { return distribution.Descriptor{}, err } @@ -104,7 +107,7 @@ func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), "digest", desc.Digest, - "length", desc.Length); err != nil { + "size", desc.Size); err != nil { return err } diff --git a/docs/storage/cache/suite.go b/docs/storage/cache/suite.go index ceefab97..f74d9f9e 100644 --- a/docs/storage/cache/suite.go +++ b/docs/storage/cache/suite.go @@ -35,14 +35,14 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ Digest: "sha384:abc", - Length: 10, + Size: 10, MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { t.Fatalf("expected error with invalid digest: %v", err) } if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ Digest: "", - Length: 10, + Size: 10, MediaType: "application/octet-stream"}); err == nil { t.Fatalf("expected error setting value on invalid descriptor") } @@ -60,7 +60,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi localDigest := digest.Digest("sha384:abc") expected := distribution.Descriptor{ Digest: "sha256:abc", - Length: 10, + Size: 10, MediaType: "application/octet-stream"} cache, err := provider.RepositoryScoped("foo/bar") From cce4956131f8083ffaa5f032fd8ea8747a269117 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Mon, 11 May 2015 18:11:47 +0200 Subject: [PATCH 185/501] Add Openstack Swift storage driver Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 519 ++++++++++++++++++++++++ docs/storage/driver/swift/swift_test.go | 141 +++++++ 2 files changed, 660 insertions(+) create mode 100644 docs/storage/driver/swift/swift.go create mode 100644 docs/storage/driver/swift/swift_test.go diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go new file mode 100644 index 00000000..4a812e9e --- /dev/null +++ b/docs/storage/driver/swift/swift.go @@ -0,0 +1,519 @@ +// Package swift provides a storagedriver.StorageDriver implementation to +// store blobs in Openstack Swift object storage. +// +// This package leverages the ncw/swift client library for interfacing with +// Swift. +// +// Because Swift is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +package swift + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + gopath "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/lebauce/swift" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "swift" + +const defaultChunkSize = 5 * 1024 * 1024 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + Username string + Password string + AuthURL string + Tenant string + Region string + Container string + Prefix string + ChunkSize int64 +} + +type swiftInfo map[string]interface{} + +func init() { + factory.Register(driverName, &swiftDriverFactory{}) +} + +// swiftDriverFactory implements the factory.StorageDriverFactory interface +type swiftDriverFactory struct{} + +func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int64 +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon Swift +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - username +// - password +// - authurl +// - container +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + username, ok := parameters["username"] + if !ok || fmt.Sprint(username) == "" { + return nil, fmt.Errorf("No username parameter provided") + } + password, ok := parameters["password"] + if !ok || fmt.Sprint(password) == "" { + return nil, fmt.Errorf("No password parameter provided") + } + authURL, ok := parameters["authurl"] + if !ok || fmt.Sprint(authURL) == "" { + return nil, fmt.Errorf("No container parameter provided") + } + container, ok := parameters["container"] + if !ok || fmt.Sprint(container) == "" { + return nil, fmt.Errorf("No container parameter provided") + } + tenant, ok := parameters["tenant"] + if !ok { + tenant = "" + } + region, ok := parameters["region"] + if !ok { + region = "" + } + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + chunkSize, ok = chunkSizeParam.(int64) + if !ok { + return nil, fmt.Errorf("The chunksize parameter should be a number") + } + } + + params := DriverParameters{ + fmt.Sprint(username), + fmt.Sprint(password), + fmt.Sprint(authURL), + fmt.Sprint(tenant), + fmt.Sprint(region), + fmt.Sprint(container), + fmt.Sprint(rootDirectory), + chunkSize, + } + + return New(params) +} + +// New constructs a new Driver with the given Openstack Swift credentials and container name +func New(params DriverParameters) (*Driver, error) { + ct := swift.Connection{ + UserName: params.Username, + ApiKey: params.Password, + AuthUrl: params.AuthURL, + Region: params.Region, + UserAgent: "distribution", + Tenant: params.Tenant, + ConnectTimeout: 60 * time.Second, + Timeout: 15 * 60 * time.Second, + } + err := ct.Authenticate() + if err != nil { + return nil, fmt.Errorf("Swift authentication failed: %s", err) + } + + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + + if err := ct.ContainerCreate(params.Container + "_segments", nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container + "_segments", err) + } + + d := &driver{ + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + BulkDeleteSupport: detectBulkDelete(params.AuthURL), + ChunkSize: params.ChunkSize, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + if dir, err := d.createParentFolder(path); err != nil { + return parseError(dir, err) + } + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), + contents, d.getContentType()) + return parseError(path, err) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" + + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + + if err != nil { + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 416 { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + + return file, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { + var ( + segments []swift.Object + paddingReader io.Reader + ) + + partNumber := int64(1) + bytesRead := int64(0) + currentLength := int64(0) + zeroBuf := make([]byte, d.ChunkSize) + segmentsContainer := d.Container + "_segments" + cursor := int64(0) + + getSegment := func() string { + return d.swiftPath(path) + "/" + fmt.Sprintf("%016d", partNumber) + } + + max := func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + + info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err != nil { + if swiftErr, ok := err.(*swift.Error); ok { + if swiftErr.StatusCode == 404 { + // Create a object manifest + if dir, err := d.createParentFolder(path); err != nil { + return bytesRead, parseError(dir, err) + } + headers := make(swift.Headers) + headers["X-Object-Manifest"] = segmentsContainer + "/" + d.swiftPath(path) + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", + d.getContentType(), headers) + manifest.Close() + if err != nil { + return bytesRead, parseError(path, err) + } + } else { + return bytesRead, parseError(path, err) + } + } else { + return bytesRead, parseError(path, err) + } + } else { + // The manifest already exists. Get all the segments + currentLength = info.Bytes + headers := make(swift.Headers) + headers["Content-Type"] = "application/json" + opts := &swift.ObjectsOpts{Prefix: d.swiftPath(path), Headers: headers} + segments, err = d.Conn.Objects(d.Container + "_segments", opts) + if err != nil { + return bytesRead, parseError(path, err) + } + } + + // First, we skip the existing segments that are not modified by this call + for i := range segments { + if offset < cursor + segments[i].Bytes { + break + } + cursor += segments[i].Bytes + partNumber++ + } + + // We reached the end of the file but we haven't reached 'offset' yet + // Therefore we add blocks of zeros + if offset >= currentLength { + for offset - currentLength >= d.ChunkSize { + // Insert a block a zero + d.Conn.ObjectPut(segmentsContainer, getSegment(), + bytes.NewReader(zeroBuf), false, "", + d.getContentType(), nil) + currentLength += d.ChunkSize + partNumber++ + } + + cursor = currentLength + paddingReader = bytes.NewReader(zeroBuf) + } else { + // Offset is inside the current segment : we need to read the + // data from the beginning of the segment to offset + paddingReader, _, err = d.Conn.ObjectOpen(segmentsContainer, getSegment(), false, nil) + if err != nil { + return bytesRead, parseError(getSegment(), err) + } + } + + multi := io.MultiReader( + io.LimitReader(paddingReader, offset - cursor), + io.LimitReader(reader, d.ChunkSize - (offset - cursor)), + ) + + for { + currentSegment, err := d.Conn.ObjectCreate(segmentsContainer, getSegment(), false, "", d.getContentType(), nil) + if err != nil { + return bytesRead, parseError(path, err) + } + + n, err := io.Copy(currentSegment, multi) + if err != nil { + return bytesRead, parseError(path, err) + } + + if n < d.ChunkSize { + // We wrote all the data + if cursor + n < currentLength { + // Copy the end of the chunk + headers := make(swift.Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor + n, 10) + "-" + strconv.FormatInt(cursor + d.ChunkSize, 10) + file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) + if err != nil { + return bytesRead, parseError(path, err) + } + io.Copy(currentSegment, file) + file.Close() + } + if n > 0 { + currentSegment.Close() + bytesRead += n - max(0, offset - cursor) + } + break + } + + currentSegment.Close() + bytesRead += n - max(0, offset - cursor) + multi = io.MultiReader(io.LimitReader(reader, d.ChunkSize)) + cursor += d.ChunkSize + partNumber++ + } + + return bytesRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err != nil { + return nil, parseError(path, err) + } + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: info.ContentType == "application/directory", + Size: info.Bytes, + ModTime: info.LastModified, + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + prefix := d.swiftPath(path) + if prefix != "" { + prefix += "/" + } + + opts := &swift.ObjectsOpts{ + Path: prefix, + Delimiter: '/', + } + + files, err := d.Conn.ObjectNames(d.Container, opts) + for index, name := range files { + files[index] = "/" + strings.TrimSuffix(name, "/") + } + + return files, parseError(path, err) +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + err := d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), + d.Container, d.swiftPath(destPath)) + if err != nil { + return parseError(sourcePath, err) + } + + return nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + opts := swift.ObjectsOpts{ + Prefix: d.swiftPath(path), + } + + objects, err := d.Conn.ObjectNamesAll(d.Container, &opts) + if err != nil { + return parseError(path, err) + } + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + for index, name := range objects { + objects[index] = name[len(d.Prefix):] + } + + var multiDelete = true + if d.BulkDeleteSupport { + _, err := d.Conn.BulkDelete(d.Container, objects) + multiDelete = err != nil + } + if multiDelete { + for _, name := range objects { + if _, headers, err := d.Conn.Object(d.Container, name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + components := strings.SplitN(manifest, "/", 2) + segContainer := components[0] + segments, err := d.Conn.ObjectNamesAll(segContainer, &swift.ObjectsOpts{ Prefix: components[1] }) + if err != nil { + return parseError(name, err) + } + + for _, s := range segments { + if err := d.Conn.ObjectDelete(segContainer, s); err != nil { + return parseError(s, err) + } + } + } + } else { + return parseError(name, err) + } + + if err := d.Conn.ObjectDelete(d.Container, name); err != nil { + return parseError(name, err) + } + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + return "", storagedriver.ErrUnsupportedMethod +} + +func (d *driver) swiftPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix, "/")+path, "/") +} + +func (d *driver) createParentFolder(path string) (string, error) { + dir := gopath.Dir(path) + if dir != "/" { + _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), + false, "", "application/directory", nil) + if err != nil { + return dir, err + } + } + } + + return dir, nil +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +func detectBulkDelete(authURL string) (bulkDelete bool) { + resp, err := http.Get(filepath.Join(authURL, "..", "..") + "/info") + if err == nil { + defer resp.Body.Close() + decoder := json.NewDecoder(resp.Body) + var infos swiftInfo + if decoder.Decode(&infos) == nil { + _, bulkDelete = infos["bulk_delete"] + } + } + return +} + +func parseError(path string, err error) error { + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go new file mode 100644 index 00000000..6038c319 --- /dev/null +++ b/docs/storage/driver/swift/swift_test.go @@ -0,0 +1,141 @@ +package swift + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/lebauce/swift/swifttest" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type SwiftDriverConstructor func(rootDirectory string) (*Driver, error) + +func init() { + var ( + username string + password string + authURL string + tenant string + container string + region string + prefix string + swiftServer *swifttest.SwiftServer + err error + ) + if username = os.Getenv("OS_USERNAME"); username == "" { + username = os.Getenv("ST_USER") + } + if password = os.Getenv("OS_PASSWORD"); password == "" { + password = os.Getenv("ST_KEY") + } + if authURL = os.Getenv("OS_AUTH_URL"); authURL == "" { + authURL = os.Getenv("ST_AUTH") + } + tenant = os.Getenv("OS_TENANT_NAME") + container = os.Getenv("OS_CONTAINER_NAME") + region = os.Getenv("OS_REGION_NAME") + prefix = os.Getenv("OS_CONTAINER_PREFIX") + + if username == "" || password == "" || authURL == "" || container == "" { + if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { + panic(err) + } + username = "swifttest" + password = "swifttest" + authURL = swiftServer.AuthURL + container = "test" + } + + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + swiftDriverConstructor := func(rootDirectory string) (*Driver, error) { + parameters := DriverParameters{ + username, + password, + authURL, + tenant, + region, + container, + prefix, + defaultChunkSize, + } + + return New(parameters) + } + + skipCheck := func() string { + return "" + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return swiftDriverConstructor(root) + } + + testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) + + RegisterSwiftDriverSuite(swiftDriverConstructor, skipCheck, swiftServer) +} + +func RegisterSwiftDriverSuite(swiftDriverConstructor SwiftDriverConstructor, skipCheck testsuites.SkipCheck, + swiftServer *swifttest.SwiftServer) { + check.Suite(&SwiftDriverSuite{ + Constructor: swiftDriverConstructor, + SkipCheck: skipCheck, + SwiftServer: swiftServer, + }) +} + +type SwiftDriverSuite struct { + Constructor SwiftDriverConstructor + SwiftServer *swifttest.SwiftServer + testsuites.SkipCheck +} + +func (suite *SwiftDriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } +} + +func (suite *SwiftDriverSuite) TestEmptyRootList(c *check.C) { + validRoot, err := ioutil.TempDir("", "driver-") + c.Assert(err, check.IsNil) + defer os.Remove(validRoot) + + rootedDriver, err := suite.Constructor(validRoot) + c.Assert(err, check.IsNil) + emptyRootDriver, err := suite.Constructor("") + c.Assert(err, check.IsNil) + slashRootDriver, err := suite.Constructor("/") + c.Assert(err, check.IsNil) + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + c.Assert(err, check.IsNil) + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } +} From 1f4eb7b73523d596b4314202d803f56475ac1bdf Mon Sep 17 00:00:00 2001 From: davidli Date: Fri, 22 May 2015 14:31:47 +0800 Subject: [PATCH 186/501] Use gofmt to format the code of swift driver. Signed-off-by: Li Wenquan --- docs/storage/driver/swift/swift.go | 36 ++++++++++++------------- docs/storage/driver/swift/swift_test.go | 16 +++++------ 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 4a812e9e..b4aaacf6 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -155,8 +155,8 @@ func New(params DriverParameters) (*Driver, error) { return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) } - if err := ct.ContainerCreate(params.Container + "_segments", nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container + "_segments", err) + if err := ct.ContainerCreate(params.Container+"_segments", nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container+"_segments", err) } d := &driver{ @@ -197,7 +197,7 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e return parseError(dir, err) } err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), - contents, d.getContentType()) + contents, d.getContentType()) return parseError(path, err) } @@ -262,7 +262,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea headers := make(swift.Headers) headers["X-Object-Manifest"] = segmentsContainer + "/" + d.swiftPath(path) manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", - d.getContentType(), headers) + d.getContentType(), headers) manifest.Close() if err != nil { return bytesRead, parseError(path, err) @@ -279,7 +279,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea headers := make(swift.Headers) headers["Content-Type"] = "application/json" opts := &swift.ObjectsOpts{Prefix: d.swiftPath(path), Headers: headers} - segments, err = d.Conn.Objects(d.Container + "_segments", opts) + segments, err = d.Conn.Objects(d.Container+"_segments", opts) if err != nil { return bytesRead, parseError(path, err) } @@ -287,7 +287,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // First, we skip the existing segments that are not modified by this call for i := range segments { - if offset < cursor + segments[i].Bytes { + if offset < cursor+segments[i].Bytes { break } cursor += segments[i].Bytes @@ -297,11 +297,11 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // We reached the end of the file but we haven't reached 'offset' yet // Therefore we add blocks of zeros if offset >= currentLength { - for offset - currentLength >= d.ChunkSize { + for offset-currentLength >= d.ChunkSize { // Insert a block a zero d.Conn.ObjectPut(segmentsContainer, getSegment(), - bytes.NewReader(zeroBuf), false, "", - d.getContentType(), nil) + bytes.NewReader(zeroBuf), false, "", + d.getContentType(), nil) currentLength += d.ChunkSize partNumber++ } @@ -318,8 +318,8 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } multi := io.MultiReader( - io.LimitReader(paddingReader, offset - cursor), - io.LimitReader(reader, d.ChunkSize - (offset - cursor)), + io.LimitReader(paddingReader, offset-cursor), + io.LimitReader(reader, d.ChunkSize-(offset-cursor)), ) for { @@ -335,10 +335,10 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if n < d.ChunkSize { // We wrote all the data - if cursor + n < currentLength { + if cursor+n < currentLength { // Copy the end of the chunk headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor + n, 10) + "-" + strconv.FormatInt(cursor + d.ChunkSize, 10) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+d.ChunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { return bytesRead, parseError(path, err) @@ -348,13 +348,13 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } if n > 0 { currentSegment.Close() - bytesRead += n - max(0, offset - cursor) + bytesRead += n - max(0, offset-cursor) } break } currentSegment.Close() - bytesRead += n - max(0, offset - cursor) + bytesRead += n - max(0, offset-cursor) multi = io.MultiReader(io.LimitReader(reader, d.ChunkSize)) cursor += d.ChunkSize partNumber++ @@ -405,7 +405,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { err := d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), - d.Container, d.swiftPath(destPath)) + d.Container, d.swiftPath(destPath)) if err != nil { return parseError(sourcePath, err) } @@ -443,7 +443,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { if ok { components := strings.SplitN(manifest, "/", 2) segContainer := components[0] - segments, err := d.Conn.ObjectNamesAll(segContainer, &swift.ObjectsOpts{ Prefix: components[1] }) + segments, err := d.Conn.ObjectNamesAll(segContainer, &swift.ObjectsOpts{Prefix: components[1]}) if err != nil { return parseError(name, err) } @@ -483,7 +483,7 @@ func (d *driver) createParentFolder(path string) (string, error) { _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), - false, "", "application/directory", nil) + false, "", "application/directory", nil) if err != nil { return dir, err } diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 6038c319..03515bb2 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -21,15 +21,15 @@ type SwiftDriverConstructor func(rootDirectory string) (*Driver, error) func init() { var ( - username string - password string - authURL string - tenant string - container string - region string - prefix string + username string + password string + authURL string + tenant string + container string + region string + prefix string swiftServer *swifttest.SwiftServer - err error + err error ) if username = os.Getenv("OS_USERNAME"); username == "" { username = os.Getenv("ST_USER") From 9f7f23e3738a3ce1474d8d651b7e6b76f9722219 Mon Sep 17 00:00:00 2001 From: nevermosby Date: Sat, 23 May 2015 15:22:41 +0800 Subject: [PATCH 187/501] Update the import path for swift driver test Signed-off-by: Li Wenquan --- docs/storage/driver/swift/swift.go | 2 +- docs/storage/driver/swift/swift_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index b4aaacf6..9287be41 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/lebauce/swift" + "github.com/ncw/swift" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 03515bb2..5ead8d15 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/lebauce/swift/swifttest" + "github.com/ncw/swift/swifttest" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" From 16a49ade166bd3d80c164c1798edf9e8cecbee39 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 29 May 2015 15:46:12 +0200 Subject: [PATCH 188/501] Handle error during copy of original content Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 9287be41..2620de00 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -343,7 +343,9 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if err != nil { return bytesRead, parseError(path, err) } - io.Copy(currentSegment, file) + if _, err := io.Copy(currentSegment, file); err != nil { + return bytesRead, parseError(path, err) + } file.Close() } if n > 0 { From 8a22c0f4e10824ad58de51a1038e0effe77569e8 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 29 May 2015 15:50:22 +0200 Subject: [PATCH 189/501] Simplify code that handles non existing manifests Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 2620de00..44f61a1f 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -253,23 +253,16 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) if err != nil { - if swiftErr, ok := err.(*swift.Error); ok { - if swiftErr.StatusCode == 404 { - // Create a object manifest - if dir, err := d.createParentFolder(path); err != nil { - return bytesRead, parseError(dir, err) - } - headers := make(swift.Headers) - headers["X-Object-Manifest"] = segmentsContainer + "/" + d.swiftPath(path) - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", - d.getContentType(), headers) - manifest.Close() - if err != nil { - return bytesRead, parseError(path, err) - } - } else { + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + // Create a object manifest + if dir, err := d.createParentFolder(path); err != nil { + return bytesRead, parseError(dir, err) + } + manifest, err := d.createManifest(path) + if err != nil { return bytesRead, parseError(path, err) } + manifest.Close() } else { return bytesRead, parseError(path, err) } From ea81e208a4263b73fc6d330256afacd0721af680 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 29 May 2015 16:09:05 +0200 Subject: [PATCH 190/501] Move Dynamic Large Object handling to dedicated methods Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 44f61a1f..c7c678cb 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -237,8 +237,8 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea bytesRead := int64(0) currentLength := int64(0) zeroBuf := make([]byte, d.ChunkSize) - segmentsContainer := d.Container + "_segments" cursor := int64(0) + segmentsContainer := d.getSegmentsContainer() getSegment := func() string { return d.swiftPath(path) + "/" + fmt.Sprintf("%016d", partNumber) @@ -269,10 +269,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } else { // The manifest already exists. Get all the segments currentLength = info.Bytes - headers := make(swift.Headers) - headers["Content-Type"] = "application/json" - opts := &swift.ObjectsOpts{Prefix: d.swiftPath(path), Headers: headers} - segments, err = d.Conn.Objects(d.Container+"_segments", opts) + segments, err = d.getAllSegments(segmentsContainer, path) if err != nil { return bytesRead, parseError(path, err) } @@ -438,14 +435,14 @@ func (d *driver) Delete(ctx context.Context, path string) error { if ok { components := strings.SplitN(manifest, "/", 2) segContainer := components[0] - segments, err := d.Conn.ObjectNamesAll(segContainer, &swift.ObjectsOpts{Prefix: components[1]}) + segments, err := d.getAllSegments(segContainer, components[1]) if err != nil { return parseError(name, err) } for _, s := range segments { - if err := d.Conn.ObjectDelete(segContainer, s); err != nil { - return parseError(s, err) + if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { + return parseError(s.Name, err) } } } @@ -492,6 +489,21 @@ func (d *driver) getContentType() string { return "application/octet-stream" } +func (d *driver) getSegmentsContainer() string { + return d.Container + "_segments" +} + +func (d *driver) getAllSegments(container string, path string) ([]swift.Object, error) { + return d.Conn.Objects(container, &swift.ObjectsOpts{Prefix: d.swiftPath(path)}) +} + +func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { + headers := make(swift.Headers) + headers["X-Object-Manifest"] = d.getSegmentsContainer() + "/" + d.swiftPath(path) + return d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", + d.getContentType(), headers) +} + func detectBulkDelete(authURL string) (bulkDelete bool) { resp, err := http.Get(filepath.Join(authURL, "..", "..") + "/info") if err == nil { From 75ce67c469a634ea92d8793deb85457242548284 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 29 May 2015 16:12:58 +0200 Subject: [PATCH 191/501] Use mitchellh/mapstructure library to parse Swift parameters Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 102 +++++++++++++---------------- 1 file changed, 44 insertions(+), 58 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index c7c678cb..a60f2029 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -21,6 +21,7 @@ import ( "strings" "time" + "github.com/mitchellh/mapstructure" "github.com/ncw/swift" "github.com/docker/distribution/context" @@ -33,6 +34,10 @@ const driverName = "swift" const defaultChunkSize = 5 * 1024 * 1024 +const minChunkSize = 1 << 20 + +const directoryMimeType = "application/directory" + //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { Username string @@ -42,7 +47,7 @@ type DriverParameters struct { Region string Container string Prefix string - ChunkSize int64 + ChunkSize int } type swiftInfo map[string]interface{} @@ -63,7 +68,7 @@ type driver struct { Container string Prefix string BulkDeleteSupport bool - ChunkSize int64 + ChunkSize int } type baseEmbed struct { @@ -83,52 +88,32 @@ type Driver struct { // - authurl // - container func FromParameters(parameters map[string]interface{}) (*Driver, error) { - username, ok := parameters["username"] - if !ok || fmt.Sprint(username) == "" { - return nil, fmt.Errorf("No username parameter provided") - } - password, ok := parameters["password"] - if !ok || fmt.Sprint(password) == "" { - return nil, fmt.Errorf("No password parameter provided") - } - authURL, ok := parameters["authurl"] - if !ok || fmt.Sprint(authURL) == "" { - return nil, fmt.Errorf("No container parameter provided") - } - container, ok := parameters["container"] - if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No container parameter provided") - } - tenant, ok := parameters["tenant"] - if !ok { - tenant = "" - } - region, ok := parameters["region"] - if !ok { - region = "" - } - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - chunkSize, ok = chunkSizeParam.(int64) - if !ok { - return nil, fmt.Errorf("The chunksize parameter should be a number") - } + params := DriverParameters{ + ChunkSize: defaultChunkSize, } - params := DriverParameters{ - fmt.Sprint(username), - fmt.Sprint(password), - fmt.Sprint(authURL), - fmt.Sprint(tenant), - fmt.Sprint(region), - fmt.Sprint(container), - fmt.Sprint(rootDirectory), - chunkSize, + if err := mapstructure.Decode(parameters, ¶ms); err != nil { + return nil, err + } + + if params.Username == "" { + return nil, fmt.Errorf("No username parameter provided") + } + + if params.Password == "" { + return nil, fmt.Errorf("No password parameter provided") + } + + if params.AuthURL == "" { + return nil, fmt.Errorf("No authurl parameter provided") + } + + if params.Container == "" { + return nil, fmt.Errorf("No container parameter provided") + } + + if params.ChunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) } return New(params) @@ -231,13 +216,14 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea var ( segments []swift.Object paddingReader io.Reader + bytesRead int64 + currentLength int64 + cursor int64 ) - partNumber := int64(1) - bytesRead := int64(0) - currentLength := int64(0) + partNumber := 1 + chunkSize := int64(d.ChunkSize) zeroBuf := make([]byte, d.ChunkSize) - cursor := int64(0) segmentsContainer := d.getSegmentsContainer() getSegment := func() string { @@ -287,12 +273,12 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // We reached the end of the file but we haven't reached 'offset' yet // Therefore we add blocks of zeros if offset >= currentLength { - for offset-currentLength >= d.ChunkSize { + for offset-currentLength >= chunkSize { // Insert a block a zero d.Conn.ObjectPut(segmentsContainer, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) - currentLength += d.ChunkSize + currentLength += chunkSize partNumber++ } @@ -309,7 +295,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea multi := io.MultiReader( io.LimitReader(paddingReader, offset-cursor), - io.LimitReader(reader, d.ChunkSize-(offset-cursor)), + io.LimitReader(reader, chunkSize-(offset-cursor)), ) for { @@ -323,12 +309,12 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return bytesRead, parseError(path, err) } - if n < d.ChunkSize { + if n < chunkSize { // We wrote all the data if cursor+n < currentLength { // Copy the end of the chunk headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+d.ChunkSize, 10) + headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { return bytesRead, parseError(path, err) @@ -347,8 +333,8 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea currentSegment.Close() bytesRead += n - max(0, offset-cursor) - multi = io.MultiReader(io.LimitReader(reader, d.ChunkSize)) - cursor += d.ChunkSize + multi = io.MultiReader(io.LimitReader(reader, chunkSize)) + cursor += chunkSize partNumber++ } @@ -365,7 +351,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, fi := storagedriver.FileInfoFields{ Path: path, - IsDir: info.ContentType == "application/directory", + IsDir: info.ContentType == directoryMimeType, Size: info.Bytes, ModTime: info.LastModified, } From 4e619bc9b100a7afcd3018af5492e29dd964a8e5 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 29 May 2015 16:14:12 +0200 Subject: [PATCH 192/501] Remove one level of indentation in swift path handling code Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index a60f2029..213dfc29 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -457,14 +457,16 @@ func (d *driver) swiftPath(path string) string { func (d *driver) createParentFolder(path string) (string, error) { dir := gopath.Dir(path) - if dir != "/" { - _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { - _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), - false, "", "application/directory", nil) - if err != nil { - return dir, err - } + if dir == "/" { + return dir, nil + } + + _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), + false, "", directoryMimeType, nil) + if err != nil { + return dir, err } } From 1d46bb2bccf69ebf81585d821467fdded6fd36fb Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 4 Jun 2015 10:10:21 +0200 Subject: [PATCH 193/501] Create full folder hierarchy instead of just the top level folder Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 31 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 213dfc29..38e87239 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -178,8 +178,8 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if dir, err := d.createParentFolder(path); err != nil { - return parseError(dir, err) + if err := d.createParentFolders(path); err != nil { + return err } err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) @@ -241,8 +241,8 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if err != nil { if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { // Create a object manifest - if dir, err := d.createParentFolder(path); err != nil { - return bytesRead, parseError(dir, err) + if err := d.createParentFolders(path); err != nil { + return bytesRead, err } manifest, err := d.createManifest(path) if err != nil { @@ -455,22 +455,21 @@ func (d *driver) swiftPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.Prefix, "/")+path, "/") } -func (d *driver) createParentFolder(path string) (string, error) { +func (d *driver) createParentFolders(path string) error { dir := gopath.Dir(path) - if dir == "/" { - return dir, nil - } - - _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { - _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), - false, "", directoryMimeType, nil) - if err != nil { - return dir, err + for dir != "/" { + _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), + false, "", directoryMimeType, nil) + if err != nil { + return parseError(dir, err) + } } + dir = gopath.Dir(dir) } - return dir, nil + return nil } func (d *driver) getContentType() string { From 3f9e7ed169af1bc5879d669e1b68cc52220f0ecb Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 4 Jun 2015 10:11:19 +0200 Subject: [PATCH 194/501] Use 'prefix' parameter instead of 'path' when listing files Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 38e87239..66c1a85b 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -361,19 +361,23 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, // List returns a list of the objects that are direct descendants of the given path. func (d *driver) List(ctx context.Context, path string) ([]string, error) { + var files []string + prefix := d.swiftPath(path) if prefix != "" { prefix += "/" } opts := &swift.ObjectsOpts{ - Path: prefix, + Prefix: prefix, Delimiter: '/', } - files, err := d.Conn.ObjectNames(d.Container, opts) - for index, name := range files { - files[index] = "/" + strings.TrimSuffix(name, "/") + objects, err := d.Conn.Objects(d.Container, opts) + for _, obj := range objects { + if !obj.PseudoDirectory { + files = append(files, "/"+strings.TrimSuffix(obj.Name, "/")) + } } return files, parseError(path, err) From 062d6266cf5153bf40dbe01d781cdeace7653aa1 Mon Sep 17 00:00:00 2001 From: davidli Date: Mon, 8 Jun 2015 16:37:11 +0800 Subject: [PATCH 195/501] Add support for Openstack Identity v3 API Signed-off-by: Li Wenquan --- docs/storage/driver/swift/swift.go | 34 ++++++++++++++++++------- docs/storage/driver/swift/swift_test.go | 31 +++++++++++++++------- 2 files changed, 47 insertions(+), 18 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 66c1a85b..0875edef 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -10,6 +10,7 @@ package swift import ( "bytes" + "crypto/tls" "encoding/json" "fmt" "io" @@ -40,14 +41,18 @@ const directoryMimeType = "application/directory" //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { - Username string - Password string - AuthURL string - Tenant string - Region string - Container string - Prefix string - ChunkSize int + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int } type swiftInfo map[string]interface{} @@ -89,7 +94,8 @@ type Driver struct { // - container func FromParameters(parameters map[string]interface{}) (*Driver, error) { params := DriverParameters{ - ChunkSize: defaultChunkSize, + ChunkSize: defaultChunkSize, + InsecureSkipVerify: false, } if err := mapstructure.Decode(parameters, ¶ms); err != nil { @@ -121,6 +127,12 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // New constructs a new Driver with the given Openstack Swift credentials and container name func New(params DriverParameters) (*Driver, error) { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + MaxIdleConnsPerHost: 2048, + TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, + } + ct := swift.Connection{ UserName: params.Username, ApiKey: params.Password, @@ -128,6 +140,10 @@ func New(params DriverParameters) (*Driver, error) { Region: params.Region, UserAgent: "distribution", Tenant: params.Tenant, + TenantId: params.TenantID, + Domain: params.Domain, + DomainId: params.DomainID, + Transport: transport, ConnectTimeout: 60 * time.Second, Timeout: 15 * 60 * time.Second, } diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 5ead8d15..fc66aa26 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -3,6 +3,7 @@ package swift import ( "io/ioutil" "os" + "strconv" "testing" "github.com/ncw/swift/swifttest" @@ -21,15 +22,19 @@ type SwiftDriverConstructor func(rootDirectory string) (*Driver, error) func init() { var ( - username string - password string - authURL string - tenant string - container string - region string - prefix string - swiftServer *swifttest.SwiftServer - err error + username string + password string + authURL string + tenant string + tenantID string + domain string + domainID string + container string + region string + prefix string + insecureSkipVerify bool + swiftServer *swifttest.SwiftServer + err error ) if username = os.Getenv("OS_USERNAME"); username == "" { username = os.Getenv("ST_USER") @@ -41,9 +46,13 @@ func init() { authURL = os.Getenv("ST_AUTH") } tenant = os.Getenv("OS_TENANT_NAME") + tenantID = os.Getenv("OS_TENANT_ID") + domain = os.Getenv("OS_DOMAIN_NAME") + domainID = os.Getenv("OS_DOMAIN_ID") container = os.Getenv("OS_CONTAINER_NAME") region = os.Getenv("OS_REGION_NAME") prefix = os.Getenv("OS_CONTAINER_PREFIX") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("ST_INSECURESKIPVERIFY")) if username == "" || password == "" || authURL == "" || container == "" { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { @@ -67,9 +76,13 @@ func init() { password, authURL, tenant, + tenantID, + domain, + domainID, region, container, prefix, + insecureSkipVerify, defaultChunkSize, } From a1ae7f712220347308f85d34d5a256aaa331149a Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 19 Jun 2015 15:55:34 +0200 Subject: [PATCH 196/501] Increase default chunk size to 20M Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 0875edef..cd195cc2 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -33,7 +33,7 @@ import ( const driverName = "swift" -const defaultChunkSize = 5 * 1024 * 1024 +const defaultChunkSize = 20 * 1024 * 1024 const minChunkSize = 1 << 20 From 9ab55eae39b544aa3d9383cd315eaa4d7a541339 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 19 Jun 2015 16:44:55 +0200 Subject: [PATCH 197/501] Use only one Swift container for both files and manifests Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 150 ++++++++++++++++------------- 1 file changed, 81 insertions(+), 69 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index cd195cc2..e5f49a95 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -156,10 +156,6 @@ func New(params DriverParameters) (*Driver, error) { return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) } - if err := ct.ContainerCreate(params.Container+"_segments", nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container+"_segments", err) - } - d := &driver{ Conn: ct, Container: params.Container, @@ -231,8 +227,8 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { var ( segments []swift.Object + multi io.Reader paddingReader io.Reader - bytesRead int64 currentLength int64 cursor int64 ) @@ -240,10 +236,9 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea partNumber := 1 chunkSize := int64(d.ChunkSize) zeroBuf := make([]byte, d.ChunkSize) - segmentsContainer := d.getSegmentsContainer() getSegment := func() string { - return d.swiftPath(path) + "/" + fmt.Sprintf("%016d", partNumber) + return d.swiftSegmentPath(path) + "/" + fmt.Sprintf("%016d", partNumber) } max := func(a int64, b int64) int64 { @@ -258,22 +253,22 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { // Create a object manifest if err := d.createParentFolders(path); err != nil { - return bytesRead, err + return 0, err } manifest, err := d.createManifest(path) if err != nil { - return bytesRead, parseError(path, err) + return 0, parseError(path, err) } manifest.Close() } else { - return bytesRead, parseError(path, err) + return 0, parseError(path, err) } } else { // The manifest already exists. Get all the segments currentLength = info.Bytes - segments, err = d.getAllSegments(segmentsContainer, path) + segments, err = d.getAllSegments(path) if err != nil { - return bytesRead, parseError(path, err) + return 0, parseError(path, err) } } @@ -291,7 +286,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if offset >= currentLength { for offset-currentLength >= chunkSize { // Insert a block a zero - d.Conn.ObjectPut(segmentsContainer, getSegment(), + d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) currentLength += chunkSize @@ -303,26 +298,34 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } else { // Offset is inside the current segment : we need to read the // data from the beginning of the segment to offset - paddingReader, _, err = d.Conn.ObjectOpen(segmentsContainer, getSegment(), false, nil) + file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) + defer file.Close() + paddingReader = file + if err != nil { - return bytesRead, parseError(getSegment(), err) + return 0, parseError(getSegment(), err) } } - multi := io.MultiReader( + multi = io.MultiReader( io.LimitReader(paddingReader, offset-cursor), io.LimitReader(reader, chunkSize-(offset-cursor)), ) - for { - currentSegment, err := d.Conn.ObjectCreate(segmentsContainer, getSegment(), false, "", d.getContentType(), nil) + writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { + currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) if err != nil { - return bytesRead, parseError(path, err) + return false, bytesRead, parseError(path, err) } n, err := io.Copy(currentSegment, multi) if err != nil { - return bytesRead, parseError(path, err) + return false, bytesRead, parseError(path, err) + } + + if n > 0 { + defer currentSegment.Close() + bytesRead += n - max(0, offset-cursor) } if n < chunkSize { @@ -333,25 +336,39 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { - return bytesRead, parseError(path, err) + return false, bytesRead, parseError(path, err) } - if _, err := io.Copy(currentSegment, file); err != nil { - return bytesRead, parseError(path, err) + + _, copyErr := io.Copy(currentSegment, file) + + if err := file.Close(); err != nil { + return false, bytesRead, parseError(path, err) + } + + if copyErr != nil { + return false, bytesRead, parseError(path, copyErr) } - file.Close() } - if n > 0 { - currentSegment.Close() - bytesRead += n - max(0, offset-cursor) - } - break + + return true, bytesRead, nil } - currentSegment.Close() - bytesRead += n - max(0, offset-cursor) - multi = io.MultiReader(io.LimitReader(reader, chunkSize)) + multi = io.LimitReader(reader, chunkSize) cursor += chunkSize partNumber++ + + return false, bytesRead, nil + } + + finished := false + read := int64(0) + bytesRead := int64(0) + for finished == false { + finished, read, err = writeSegment(getSegment()) + bytesRead += read + if err != nil { + return bytesRead, err + } } return bytesRead, nil @@ -392,7 +409,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { objects, err := d.Conn.Objects(d.Container, opts) for _, obj := range objects { if !obj.PseudoDirectory { - files = append(files, "/"+strings.TrimSuffix(obj.Name, "/")) + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } } @@ -425,40 +442,35 @@ func (d *driver) Delete(ctx context.Context, path string) error { return storagedriver.PathNotFoundError{Path: path} } - for index, name := range objects { - objects[index] = name[len(d.Prefix):] - } - - var multiDelete = true if d.BulkDeleteSupport { - _, err := d.Conn.BulkDelete(d.Container, objects) - multiDelete = err != nil + if _, err := d.Conn.BulkDelete(d.Container, objects); err != swift.Forbidden { + return parseError(path, err) + } } - if multiDelete { - for _, name := range objects { - if _, headers, err := d.Conn.Object(d.Container, name); err == nil { - manifest, ok := headers["X-Object-Manifest"] - if ok { - components := strings.SplitN(manifest, "/", 2) - segContainer := components[0] - segments, err := d.getAllSegments(segContainer, components[1]) - if err != nil { - return parseError(name, err) - } - for _, s := range segments { - if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { - return parseError(s.Name, err) - } + for _, name := range objects { + if _, headers, err := d.Conn.Object(d.Container, name); err == nil { + manifest, ok := headers["X-Object-Manifest"] + if ok { + components := strings.SplitN(manifest, "/", 2) + segContainer := components[0] + segments, err := d.getAllSegments(components[1]) + if err != nil { + return parseError(name, err) + } + + for _, s := range segments { + if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { + return parseError(s.Name, err) } } - } else { - return parseError(name, err) } + } else { + return parseError(name, err) + } - if err := d.Conn.ObjectDelete(d.Container, name); err != nil { - return parseError(name, err) - } + if err := d.Conn.ObjectDelete(d.Container, name); err != nil { + return parseError(name, err) } } @@ -472,14 +484,18 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int } func (d *driver) swiftPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.Prefix, "/")+path, "/") + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") +} + +func (d *driver) swiftSegmentPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments"+path, "/"), "/") } func (d *driver) createParentFolders(path string) error { dir := gopath.Dir(path) for dir != "/" { _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), false, "", directoryMimeType, nil) if err != nil { @@ -496,17 +512,13 @@ func (d *driver) getContentType() string { return "application/octet-stream" } -func (d *driver) getSegmentsContainer() string { - return d.Container + "_segments" -} - -func (d *driver) getAllSegments(container string, path string) ([]swift.Object, error) { - return d.Conn.Objects(container, &swift.ObjectsOpts{Prefix: d.swiftPath(path)}) +func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + return d.Conn.Objects(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) } func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { headers := make(swift.Headers) - headers["X-Object-Manifest"] = d.getSegmentsContainer() + "/" + d.swiftPath(path) + headers["X-Object-Manifest"] = d.Container + "/" + d.swiftSegmentPath(path) return d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) } From d91c4cb6947559ff9c3dff44242d950fd5297b9f Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 19 Jun 2015 16:46:10 +0200 Subject: [PATCH 198/501] Improve 404 errors handling Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index e5f49a95..5c9107e9 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -250,7 +250,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) if err != nil { - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { // Create a object manifest if err := d.createParentFolders(path); err != nil { return 0, err @@ -537,7 +537,7 @@ func detectBulkDelete(authURL string) (bulkDelete bool) { } func parseError(path string, err error) error { - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 404 { + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } From 5cce023aa987a73b1c2e78348f0592397075a454 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Mon, 22 Jun 2015 21:27:49 +0200 Subject: [PATCH 199/501] Do not read segment if no padding is necessary Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 5c9107e9..e0284b9c 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -295,7 +295,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea cursor = currentLength paddingReader = bytes.NewReader(zeroBuf) - } else { + } else if offset-cursor > 0 { // Offset is inside the current segment : we need to read the // data from the beginning of the segment to offset file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) @@ -307,10 +307,12 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } } - multi = io.MultiReader( - io.LimitReader(paddingReader, offset-cursor), - io.LimitReader(reader, chunkSize-(offset-cursor)), - ) + readers := []io.Reader{} + if paddingReader != nil { + readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) + } + readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) + multi = io.MultiReader(readers...) writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) From 7b0276dce55e95061bbedab1f2fa325de8e61a63 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 14:17:12 +0200 Subject: [PATCH 200/501] Add code documentation Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index e0284b9c..7de6f8de 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -4,8 +4,18 @@ // This package leverages the ncw/swift client library for interfacing with // Swift. // -// Because Swift is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) +// It supports both TempAuth authentication and Keystone authentication +// (up to version 3). +// +// Since Swift has no concept of directories (directories are an abstration), +// empty objects are created with the MIME type application/vnd.swift.directory. +// +// As Swift has a limit on the size of a single uploaded object (by default +// this is 5GB), the driver makes use of the Swift Large Object Support +// (http://docs.openstack.org/developer/swift/overview_large_objects.html). +// Only one container is used for both manifests and data objects. Manifests +// are stored in the 'files' pseudo directory, data objects are stored under +// 'segments'. package swift import ( @@ -33,8 +43,10 @@ import ( const driverName = "swift" +// defaultChunkSize defines the default size of a segment const defaultChunkSize = 20 * 1024 * 1024 +// minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 const directoryMimeType = "application/directory" @@ -80,8 +92,8 @@ type baseEmbed struct { base.Base } -// Driver is a storagedriver.StorageDriver implementation backed by Amazon Swift -// Objects are stored at absolute keys in the provided bucket. +// Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift +// Objects are stored at absolute keys in the provided container. type Driver struct { baseEmbed } From 80bfcb68a87ecfda86e38fc0fc87000cf675e231 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 14:21:03 +0200 Subject: [PATCH 201/501] Change folder mime type to application/vnc.swift.directory Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 7de6f8de..380d65da 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -49,7 +49,8 @@ const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 -const directoryMimeType = "application/directory" +// Vendor MIME type used for objects that act as directories +const directoryMimeType = "application/vnd.swift.directory" //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { From fbc74a6457bedfead409567a4c2dc60e15cd5856 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 14:22:41 +0200 Subject: [PATCH 202/501] Rename DriverParameters structure to Parameters Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 8 ++++---- docs/storage/driver/swift/swift_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 380d65da..f91af908 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -52,8 +52,8 @@ const minChunkSize = 1 << 20 // Vendor MIME type used for objects that act as directories const directoryMimeType = "application/vnd.swift.directory" -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { +// Parameters A struct that encapsulates all of the driver parameters after all values have been set +type Parameters struct { Username string Password string AuthURL string @@ -106,7 +106,7 @@ type Driver struct { // - authurl // - container func FromParameters(parameters map[string]interface{}) (*Driver, error) { - params := DriverParameters{ + params := Parameters{ ChunkSize: defaultChunkSize, InsecureSkipVerify: false, } @@ -139,7 +139,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { } // New constructs a new Driver with the given Openstack Swift credentials and container name -func New(params DriverParameters) (*Driver, error) { +func New(params Parameters) (*Driver, error) { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, MaxIdleConnsPerHost: 2048, diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index fc66aa26..e0bab62e 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -71,7 +71,7 @@ func init() { defer os.Remove(root) swiftDriverConstructor := func(rootDirectory string) (*Driver, error) { - parameters := DriverParameters{ + parameters := Parameters{ username, password, authURL, From 2524f300dcd381cd6cdedf20b001d690924e1500 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 14:23:26 +0200 Subject: [PATCH 203/501] Check file has been opened before closing it Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index f91af908..dd7238c2 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -312,12 +312,12 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // Offset is inside the current segment : we need to read the // data from the beginning of the segment to offset file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) - defer file.Close() - paddingReader = file - if err != nil { return 0, parseError(getSegment(), err) } + + defer file.Close() + paddingReader = file } readers := []io.Reader{} From 1b28eea2329483f8e381050f2e80e1a50913e1c2 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 14:24:16 +0200 Subject: [PATCH 204/501] Rename environment variables to run Swift testsuite Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift_test.go | 28 ++++++++++--------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index e0bab62e..1e04ab24 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -36,23 +36,17 @@ func init() { swiftServer *swifttest.SwiftServer err error ) - if username = os.Getenv("OS_USERNAME"); username == "" { - username = os.Getenv("ST_USER") - } - if password = os.Getenv("OS_PASSWORD"); password == "" { - password = os.Getenv("ST_KEY") - } - if authURL = os.Getenv("OS_AUTH_URL"); authURL == "" { - authURL = os.Getenv("ST_AUTH") - } - tenant = os.Getenv("OS_TENANT_NAME") - tenantID = os.Getenv("OS_TENANT_ID") - domain = os.Getenv("OS_DOMAIN_NAME") - domainID = os.Getenv("OS_DOMAIN_ID") - container = os.Getenv("OS_CONTAINER_NAME") - region = os.Getenv("OS_REGION_NAME") - prefix = os.Getenv("OS_CONTAINER_PREFIX") - insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("ST_INSECURESKIPVERIFY")) + username = os.Getenv("SWIFT_USERNAME") + password = os.Getenv("SWIFT_PASSWORD") + authURL = os.Getenv("SWIFT_AUTH_URL") + tenant = os.Getenv("SWIFT_TENANT_NAME") + tenantID = os.Getenv("SWIFT_TENANT_ID") + domain = os.Getenv("SWIFT_DOMAIN_NAME") + domainID = os.Getenv("SWIFT_DOMAIN_ID") + container = os.Getenv("SWIFT_CONTAINER_NAME") + region = os.Getenv("SWIFT_REGION_NAME") + prefix = os.Getenv("SWIFT_CONTAINER_PREFIX") + insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) if username == "" || password == "" || authURL == "" || container == "" { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { From 913fe195fd496e91b5167e19eb33c965d2171493 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 23:09:02 +0200 Subject: [PATCH 205/501] Do not use suite style testing for Swift specific tests Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift_test.go | 81 +++++++++++-------------- 1 file changed, 34 insertions(+), 47 deletions(-) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 1e04ab24..726b5666 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -18,7 +18,7 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -type SwiftDriverConstructor func(rootDirectory string) (*Driver, error) +var swiftDriverConstructor func(prefix string) (*Driver, error) func init() { var ( @@ -31,7 +31,6 @@ func init() { domainID string container string region string - prefix string insecureSkipVerify bool swiftServer *swifttest.SwiftServer err error @@ -45,7 +44,6 @@ func init() { domainID = os.Getenv("SWIFT_DOMAIN_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") - prefix = os.Getenv("SWIFT_CONTAINER_PREFIX") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) if username == "" || password == "" || authURL == "" || container == "" { @@ -58,13 +56,13 @@ func init() { container = "test" } - root, err := ioutil.TempDir("", "driver-") + prefix, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } - defer os.Remove(root) + defer os.Remove(prefix) - swiftDriverConstructor := func(rootDirectory string) (*Driver, error) { + swiftDriverConstructor = func(root string) (*Driver, error) { parameters := Parameters{ username, password, @@ -75,7 +73,7 @@ func init() { domainID, region, container, - prefix, + root, insecureSkipVerify, defaultChunkSize, } @@ -83,66 +81,55 @@ func init() { return New(parameters) } - skipCheck := func() string { - return "" - } - driverConstructor := func() (storagedriver.StorageDriver, error) { - return swiftDriverConstructor(root) + return swiftDriverConstructor(prefix) } - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - RegisterSwiftDriverSuite(swiftDriverConstructor, skipCheck, swiftServer) + testsuites.RegisterInProcessSuite(driverConstructor, testsuites.NeverSkip) } -func RegisterSwiftDriverSuite(swiftDriverConstructor SwiftDriverConstructor, skipCheck testsuites.SkipCheck, - swiftServer *swifttest.SwiftServer) { - check.Suite(&SwiftDriverSuite{ - Constructor: swiftDriverConstructor, - SkipCheck: skipCheck, - SwiftServer: swiftServer, - }) -} - -type SwiftDriverSuite struct { - Constructor SwiftDriverConstructor - SwiftServer *swifttest.SwiftServer - testsuites.SkipCheck -} - -func (suite *SwiftDriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } -} - -func (suite *SwiftDriverSuite) TestEmptyRootList(c *check.C) { +func TestEmptyRootList(t *testing.T) { validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } defer os.Remove(validRoot) - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) + rootedDriver, err := swiftDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := swiftDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := swiftDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } } From 01686e2c0754f039e42302251f2e5eff7a51e3e9 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 30 Jun 2015 23:39:04 +0200 Subject: [PATCH 206/501] Show distribution version in User-Agent Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index dd7238c2..c51cc31a 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -39,6 +39,7 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/version" ) const driverName = "swift" @@ -151,7 +152,7 @@ func New(params Parameters) (*Driver, error) { ApiKey: params.Password, AuthUrl: params.AuthURL, Region: params.Region, - UserAgent: "distribution", + UserAgent: "distribution/" + version.Version, Tenant: params.Tenant, TenantId: params.TenantID, Domain: params.Domain, From 91d74a3ee2cafdd0117da4a12a3420309b66bc15 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 10:59:14 +0200 Subject: [PATCH 207/501] Protect against deletion of objects with the same prefix Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 34 +++++++++++++++++++----------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index c51cc31a..9570244a 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -447,32 +447,36 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { opts := swift.ObjectsOpts{ - Prefix: d.swiftPath(path), + Prefix: d.swiftPath(path) + "/", } - objects, err := d.Conn.ObjectNamesAll(d.Container, &opts) + objects, err := d.Conn.Objects(d.Container, &opts) if err != nil { return parseError(path, err) } - if len(objects) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } if d.BulkDeleteSupport { - if _, err := d.Conn.BulkDelete(d.Container, objects); err != swift.Forbidden { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { return parseError(path, err) } } - for _, name := range objects { - if _, headers, err := d.Conn.Object(d.Container, name); err == nil { + for _, obj := range objects { + if obj.PseudoDirectory { + continue + } + if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { manifest, ok := headers["X-Object-Manifest"] if ok { components := strings.SplitN(manifest, "/", 2) segContainer := components[0] segments, err := d.getAllSegments(components[1]) if err != nil { - return parseError(name, err) + return parseError(obj.Name, err) } for _, s := range segments { @@ -482,14 +486,20 @@ func (d *driver) Delete(ctx context.Context, path string) error { } } } else { - return parseError(name, err) + return parseError(obj.Name, err) } - if err := d.Conn.ObjectDelete(d.Container, name); err != nil { - return parseError(name, err) + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + return parseError(obj.Name, err) } } + if _, err := d.Stat(ctx, path); err == nil { + return parseError(path, d.Conn.ObjectDelete(d.Container, d.swiftPath(path))) + } else if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil } From 7a5aa32a64abf390f92c7dd684c514664f0d9268 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 11:02:47 +0200 Subject: [PATCH 208/501] Use file instead of filepath as it may cause troubles on Windows Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 9570244a..30009057 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -27,7 +27,6 @@ import ( "io/ioutil" "net/http" gopath "path" - "path/filepath" "strconv" "strings" "time" @@ -550,7 +549,7 @@ func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { } func detectBulkDelete(authURL string) (bulkDelete bool) { - resp, err := http.Get(filepath.Join(authURL, "..", "..") + "/info") + resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") if err == nil { defer resp.Body.Close() decoder := json.NewDecoder(resp.Body) From 0807282859290e813d08c82f37ea4f0d8e100268 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 11:04:10 +0200 Subject: [PATCH 209/501] Use http.StatusRequestedRangeNotSatisfiable instead of error code Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 30009057..2f2e0c60 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -220,7 +220,7 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == 416 { + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { return ioutil.NopCloser(bytes.NewReader(nil)), nil } From f190aa4a7c7f1f922cc6239215d8cb5255beddcf Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 18:23:34 +0200 Subject: [PATCH 210/501] Refactor segment path concatenation code Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 2f2e0c60..b5f4fcd2 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -251,7 +251,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea zeroBuf := make([]byte, d.ChunkSize) getSegment := func() string { - return d.swiftSegmentPath(path) + "/" + fmt.Sprintf("%016d", partNumber) + return fmt.Sprintf("%s/%016d", d.swiftSegmentPath(path), partNumber) } max := func(a int64, b int64) int64 { From 704e08225447affb2b60c2c52d98657c0a72d5fc Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 18:25:27 +0200 Subject: [PATCH 211/501] Do not create objects for directories Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 65 +++++++++++++----------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index b5f4fcd2..08c79c89 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -49,9 +49,6 @@ const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 -// Vendor MIME type used for objects that act as directories -const directoryMimeType = "application/vnd.swift.directory" - // Parameters A struct that encapsulates all of the driver parameters after all values have been set type Parameters struct { Username string @@ -203,9 +200,6 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if err := d.createParentFolders(path); err != nil { - return err - } err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) return parseError(path, err) @@ -265,9 +259,6 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if err != nil { if err == swift.ContainerNotFound || err == swift.ObjectNotFound { // Create a object manifest - if err := d.createParentFolders(path); err != nil { - return 0, err - } manifest, err := d.createManifest(path) if err != nil { return 0, parseError(path, err) @@ -392,19 +383,40 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) + swiftPath := d.swiftPath(path) + opts := &swift.ObjectsOpts{ + Prefix: swiftPath, + Delimiter: '/', + } + + objects, err := d.Conn.ObjectsAll(d.Container, opts) if err != nil { - return nil, parseError(path, err) + return nil, err } fi := storagedriver.FileInfoFields{ - Path: path, - IsDir: info.ContentType == directoryMimeType, - Size: info.Bytes, - ModTime: info.LastModified, + Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), } - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + for _, obj := range objects { + if obj.PseudoDirectory && obj.Name == swiftPath+"/" { + fi.IsDir = true + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } else if obj.Name == swiftPath { + // On Swift 1.12, the 'bytes' field is always 0 + // so we need to do a second HEAD request + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + return nil, parseError(path, err) + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + } + + return nil, storagedriver.PathNotFoundError{Path: path} } // List returns a list of the objects that are direct descendants of the given path. @@ -423,9 +435,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { objects, err := d.Conn.Objects(d.Container, opts) for _, obj := range objects { - if !obj.PseudoDirectory { - files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) - } + files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } return files, parseError(path, err) @@ -516,23 +526,6 @@ func (d *driver) swiftSegmentPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments"+path, "/"), "/") } -func (d *driver) createParentFolders(path string) error { - dir := gopath.Dir(path) - for dir != "/" { - _, _, err := d.Conn.Object(d.Container, d.swiftPath(dir)) - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { - _, err := d.Conn.ObjectPut(d.Container, d.swiftPath(dir), bytes.NewReader(make([]byte, 0)), - false, "", directoryMimeType, nil) - if err != nil { - return parseError(dir, err) - } - } - dir = gopath.Dir(dir) - } - - return nil -} - func (d *driver) getContentType() string { return "application/octet-stream" } From 661f197f68ab05d292f4cf1bf13f2c96b778cab6 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 2 Jul 2015 18:27:13 +0200 Subject: [PATCH 212/501] Retrieve all the objects using pagination Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 08c79c89..4daf7ccc 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -433,7 +433,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { Delimiter: '/', } - objects, err := d.Conn.Objects(d.Container, opts) + objects, err := d.Conn.ObjectsAll(d.Container, opts) for _, obj := range objects { files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } @@ -459,7 +459,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { Prefix: d.swiftPath(path) + "/", } - objects, err := d.Conn.Objects(d.Container, &opts) + objects, err := d.Conn.ObjectsAll(d.Container, &opts) if err != nil { return parseError(path, err) } @@ -531,7 +531,7 @@ func (d *driver) getContentType() string { } func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - return d.Conn.Objects(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) + return d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) } func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { From 000dec3c6f6e92ec20cb86d1375ec82d2f6062b3 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 3 Jul 2015 12:29:54 +0200 Subject: [PATCH 213/501] Inline Swift errors handling Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 161 ++++++++++++++++++----------- 1 file changed, 102 insertions(+), 59 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 4daf7ccc..e3c73982 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -192,17 +192,19 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) - if err != nil { - return nil, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} } return content, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), - contents, d.getContentType()) - return parseError(path, err) + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a @@ -212,16 +214,13 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - - if err != nil { - if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} } - - return file, nil + if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return file, err } // WriteStream stores the contents of the provided io.Reader at a @@ -257,22 +256,23 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { // Create a object manifest manifest, err := d.createManifest(path) if err != nil { - return 0, parseError(path, err) + return 0, err } manifest.Close() + } else if err == swift.ContainerNotFound { + return 0, storagedriver.PathNotFoundError{Path: path} } else { - return 0, parseError(path, err) + return 0, err } } else { // The manifest already exists. Get all the segments currentLength = info.Bytes - segments, err = d.getAllSegments(path) - if err != nil { - return 0, parseError(path, err) + if segments, err = d.getAllSegments(path); err != nil { + return 0, err } } @@ -290,9 +290,13 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if offset >= currentLength { for offset-currentLength >= chunkSize { // Insert a block a zero - d.Conn.ObjectPut(d.Container, getSegment(), - bytes.NewReader(zeroBuf), false, "", - d.getContentType(), nil) + _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + if err != nil { + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err + } currentLength += chunkSize partNumber++ } @@ -304,9 +308,11 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // data from the beginning of the segment to offset file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) if err != nil { - return 0, parseError(getSegment(), err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return 0, storagedriver.PathNotFoundError{Path: getSegment()} + } + return 0, err } - defer file.Close() paddingReader = file } @@ -321,12 +327,15 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) if err != nil { - return false, bytesRead, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + } + return false, bytesRead, err } n, err := io.Copy(currentSegment, multi) if err != nil { - return false, bytesRead, parseError(path, err) + return false, bytesRead, err } if n > 0 { @@ -342,17 +351,23 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { - return false, bytesRead, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err } _, copyErr := io.Copy(currentSegment, file) if err := file.Close(); err != nil { - return false, bytesRead, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return false, bytesRead, storagedriver.PathNotFoundError{Path: path} + } + return false, bytesRead, err } if copyErr != nil { - return false, bytesRead, parseError(path, copyErr) + return false, bytesRead, copyErr } } @@ -391,6 +406,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, objects, err := d.Conn.ObjectsAll(d.Container, opts) if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } return nil, err } @@ -407,7 +425,10 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, // so we need to do a second HEAD request info, _, err := d.Conn.Object(d.Container, swiftPath) if err != nil { - return nil, parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err } fi.IsDir = false fi.Size = info.Bytes @@ -438,19 +459,20 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } - return files, parseError(path, err) + if err == swift.ContainerNotFound { + return files, storagedriver.PathNotFoundError{Path: path} + } + return files, err } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - err := d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), - d.Container, d.swiftPath(destPath)) - if err != nil { - return parseError(sourcePath, err) + err := d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} } - - return nil + return err } // Delete recursively deletes all objects stored at "path" and its subpaths. @@ -461,7 +483,10 @@ func (d *driver) Delete(ctx context.Context, path string) error { objects, err := d.Conn.ObjectsAll(d.Container, &opts) if err != nil { - return parseError(path, err) + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err } if d.BulkDeleteSupport { @@ -470,7 +495,10 @@ func (d *driver) Delete(ctx context.Context, path string) error { filenames[i] = obj.Name } if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { - return parseError(path, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err } } @@ -485,30 +513,46 @@ func (d *driver) Delete(ctx context.Context, path string) error { segContainer := components[0] segments, err := d.getAllSegments(components[1]) if err != nil { - return parseError(obj.Name, err) + return err } for _, s := range segments { if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { - return parseError(s.Name, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: s.Name} + } + return err } } } } else { - return parseError(obj.Name, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err } if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - return parseError(obj.Name, err) + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err } } - if _, err := d.Stat(ctx, path); err == nil { - return parseError(path, d.Conn.ObjectDelete(d.Container, d.swiftPath(path))) - } else if len(objects) == 0 { + _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { + if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + } else if err == swift.ObjectNotFound && len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } else if err == swift.ContainerNotFound { return storagedriver.PathNotFoundError{Path: path} } - return nil } @@ -531,14 +575,21 @@ func (d *driver) getContentType() string { } func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - return d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return segments, err } func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { headers := make(swift.Headers) headers["X-Object-Manifest"] = d.Container + "/" + d.swiftSegmentPath(path) - return d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", - d.getContentType(), headers) + file, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return file, err } func detectBulkDelete(authURL string) (bulkDelete bool) { @@ -553,11 +604,3 @@ func detectBulkDelete(authURL string) (bulkDelete bool) { } return } - -func parseError(path string, err error) error { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} From 52d28ec81a9e826ada36069e6709beb4db64b563 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Wed, 8 Jul 2015 12:59:29 +0200 Subject: [PATCH 214/501] Do not use Swift server side copy for manifests to handle >5G files Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 103 +++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 27 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index e3c73982..ce5df88d 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -20,7 +20,10 @@ package swift import ( "bytes" + "crypto/rand" + "crypto/sha1" "crypto/tls" + "encoding/hex" "encoding/json" "fmt" "io" @@ -237,6 +240,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea paddingReader io.Reader currentLength int64 cursor int64 + segmentPath string ) partNumber := 1 @@ -244,7 +248,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea zeroBuf := make([]byte, d.ChunkSize) getSegment := func() string { - return fmt.Sprintf("%s/%016d", d.swiftSegmentPath(path), partNumber) + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) } max := func(a int64, b int64) int64 { @@ -254,24 +258,36 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return b } - info, _, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err != nil { - if err == swift.ObjectNotFound { - // Create a object manifest - manifest, err := d.createManifest(path) - if err != nil { + createManifest := true + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == nil { + manifest, ok := headers["X-Object-Manifest"] + if !ok { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { return 0, err } - manifest.Close() - } else if err == swift.ContainerNotFound { - return 0, storagedriver.PathNotFoundError{Path: path} + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { + return 0, err + } + segments = append(segments, info) } else { + _, segmentPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentPath); err != nil { + return 0, err + } + createManifest = false + } + currentLength = info.Bytes + } else if err == swift.ObjectNotFound { + if segmentPath, err = d.swiftSegmentPath(path); err != nil { return 0, err } } else { - // The manifest already exists. Get all the segments - currentLength = info.Bytes - if segments, err = d.getAllSegments(path); err != nil { + return 0, err + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { return 0, err } } @@ -468,8 +484,18 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - err := d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) + if err == nil { + if manifest, ok := headers["X-Object-Manifest"]; ok { + if err = d.createManifest(destPath, manifest); err != nil { + return err + } + err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) + } else { + err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) + } + } + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } return err @@ -509,9 +535,8 @@ func (d *driver) Delete(ctx context.Context, path string) error { if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { manifest, ok := headers["X-Object-Manifest"] if ok { - components := strings.SplitN(manifest, "/", 2) - segContainer := components[0] - segments, err := d.getAllSegments(components[1]) + segContainer, prefix := parseManifest(manifest) + segments, err := d.getAllSegments(prefix) if err != nil { return err } @@ -566,8 +591,14 @@ func (d *driver) swiftPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") } -func (d *driver) swiftSegmentPath(path string) string { - return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments"+path, "/"), "/") +func (d *driver) swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil } func (d *driver) getContentType() string { @@ -575,21 +606,30 @@ func (d *driver) getContentType() string { } func (d *driver) getAllSegments(path string) ([]swift.Object, error) { - segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: d.swiftSegmentPath(path)}) + segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) if err == swift.ContainerNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return segments, err } -func (d *driver) createManifest(path string) (*swift.ObjectCreateFile, error) { +func (d *driver) createManifest(path string, segments string) error { headers := make(swift.Headers) - headers["X-Object-Manifest"] = d.Container + "/" + d.swiftSegmentPath(path) - file, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} + headers["X-Object-Manifest"] = segments + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + if err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err } - return file, err + if err := manifest.Close(); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + return err + } + return nil } func detectBulkDelete(authURL string) (bulkDelete bool) { @@ -604,3 +644,12 @@ func detectBulkDelete(authURL string) (bulkDelete bool) { } return } + +func parseManifest(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} From 81765f8cbb7b5d426b1444937ff22c672a87b217 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Wed, 8 Jul 2015 13:01:34 +0200 Subject: [PATCH 215/501] Catch either missing containers or objects Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 38 ++++++++++++++++-------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index ce5df88d..0921ccc0 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -195,7 +195,7 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return content, nil @@ -204,7 +204,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err @@ -217,7 +217,7 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { @@ -308,7 +308,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // Insert a block a zero _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return 0, storagedriver.PathNotFoundError{Path: getSegment()} } return 0, err @@ -324,7 +324,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // data from the beginning of the segment to offset file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return 0, storagedriver.PathNotFoundError{Path: getSegment()} } return 0, err @@ -343,7 +343,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} } return false, bytesRead, err @@ -367,7 +367,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: path} } return false, bytesRead, err @@ -376,7 +376,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea _, copyErr := io.Copy(currentSegment, file) if err := file.Close(); err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: path} } return false, bytesRead, err @@ -441,7 +441,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, // so we need to do a second HEAD request info, _, err := d.Conn.Object(d.Container, swiftPath) if err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return nil, err @@ -521,7 +521,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { filenames[i] = obj.Name } if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ContainerNotFound { return storagedriver.PathNotFoundError{Path: path} } return err @@ -543,7 +543,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { for _, s := range segments { if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: s.Name} } return err @@ -551,14 +551,14 @@ func (d *driver) Delete(ctx context.Context, path string) error { } } } else { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: obj.Name} } return err } if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: obj.Name} } return err @@ -568,15 +568,17 @@ func (d *driver) Delete(ctx context.Context, path string) error { _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) if err == nil { if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { - if err == swift.ContainerNotFound || err == swift.ObjectNotFound { + if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } - } else if err == swift.ObjectNotFound && len(objects) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } else if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} + } else if err == swift.ObjectNotFound { + if len(objects) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + } else { + return err } return nil } From b2935158b2c8f88ccbf332f9361960eebeb0e979 Mon Sep 17 00:00:00 2001 From: davidli Date: Fri, 17 Jul 2015 14:02:51 +0800 Subject: [PATCH 216/501] Remove IPC support from test file Signed-off-by: Li Wenquan --- docs/storage/driver/swift/swift_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 726b5666..6be2238a 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -85,7 +85,7 @@ func init() { return swiftDriverConstructor(prefix) } - testsuites.RegisterInProcessSuite(driverConstructor, testsuites.NeverSkip) + testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) } func TestEmptyRootList(t *testing.T) { From ceb2c7de44405da054be5e391c1ceeb4fb2c7da4 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 21 Jul 2015 17:10:36 -0700 Subject: [PATCH 217/501] Add additional test coverage for the regexp contained in RepositoryNameRegexp This was inspired by problems found with new regexps proposed in PR #690 Signed-off-by: Aaron Lehmann --- docs/api/v2/names_test.go | 142 ++++++++++++++++++++++++++++++++++++- docs/api/v2/routes_test.go | 21 ++++++ 2 files changed, 162 insertions(+), 1 deletion(-) diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 51e0ba8b..3a017037 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func TestRepositoryNameRegexp(t *testing.T) { +func TestRepositoryComponentNameRegexp(t *testing.T) { for _, testcase := range []struct { input string err error @@ -149,3 +149,143 @@ func TestRepositoryNameRegexp(t *testing.T) { } } } + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range []struct { + input string + invalid bool + }{ + { + input: "short", + }, + { + input: "simple/name", + }, + { + input: "library/ubuntu", + }, + { + input: "docker/stevvooe/app", + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + }, + { + input: "aa/aa/bb/bb/bb", + }, + { + input: "a/a/a/b/b", + }, + { + input: "a/a/a/a/", + invalid: true, + }, + { + input: "a//a/a", + invalid: true, + }, + { + input: "a", + }, + { + input: "a/aa", + }, + { + input: "aa/a", + }, + { + input: "a/aa/a", + }, + { + input: "foo.com/", + invalid: true, + }, + { + // currently not allowed by the regex + input: "foo.com:8080/bar", + invalid: true, + }, + { + input: "foo.com/bar", + }, + { + input: "foo.com/bar/baz", + }, + { + input: "foo.com/bar/baz/quux", + }, + { + input: "blog.foo.com/bar/baz", + }, + { + input: "asdf", + }, + { + input: "asdf$$^/aa", + invalid: true, + }, + { + input: "aa-a/aa", + }, + { + input: "aa/aa", + }, + { + input: "a-a/a-a", + }, + { + input: "a-/a/a/a", + invalid: true, + }, + { + input: "-foo/bar", + invalid: true, + }, + { + input: "foo/bar-", + invalid: true, + }, + { + input: "foo-/bar", + invalid: true, + }, + { + input: "foo/-bar", + invalid: true, + }, + { + input: "_foo/bar", + invalid: true, + }, + { + input: "foo/bar_", + invalid: true, + }, + { + input: "____/____", + invalid: true, + }, + { + input: "_docker/_docker", + invalid: true, + }, + { + input: "docker_/docker_", + invalid: true, + }, + } { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + matches := RepositoryNameRegexp.FindString(testcase.input) == testcase.input + if matches == testcase.invalid { + if testcase.invalid { + failf("expected invalid repository name %s", testcase.input) + } else { + failf("expected valid repository name %s", testcase.input) + } + } + } +} diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index 9fd29a4f..b8d724df 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -66,6 +66,27 @@ func TestRouter(t *testing.T) { "name": "foo/bar", }, }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar", + }, + }, + { + RouteName: RouteNameTags, + RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", + Vars: map[string]string{ + "name": "docker.com/foo/bar/baz", + }, + }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", From 1d68d81b424ae295bdbca431f8f5419b06c1cd32 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Jun 2015 19:10:51 -0700 Subject: [PATCH 218/501] Catalog V2 API specification proposal This contains a proposal for a catalog API, provided access to the internal contents of a registry instance. The API endpoint is prefixed with an underscore, which is illegal in images names, to prevent collisions with repositories names. To avoid issues with large result sets, a paginated version of the API is proposed. We make an addition to the tags API to support pagination to ensure the specification is conistent. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 135 +++++++++++++++++++++++++++++++++++++ docs/api/v2/routes.go | 1 + 2 files changed, 136 insertions(+) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index f2551ffe..4eec6492 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -87,6 +87,23 @@ var ( Format: "", } + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + unauthorizedResponse = ResponseDescriptor{ Description: "The client does not have access to the repository.", StatusCode: http.StatusUnauthorized, @@ -269,6 +286,9 @@ type ResponseDescriptor struct { // Headers covers any headers that may be returned from the response. Headers []ParameterDescriptor + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + // ErrorCodes enumerates the error codes that may be returned along with // the response. ErrorCodes []errcode.ErrorCode @@ -427,6 +447,44 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, + { + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Fields: []ParameterDescriptor{ + { + Name: "next", + Type: "url", + Description: "Provides the URL to get the next set of results, if available.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], + "next": "?last=&n=" +}`, + }, + }, + }, + }, }, }, }, @@ -1320,6 +1378,83 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch Complete", + Description: "Request an unabridged list of repositories available.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Fields: []ParameterDescriptor{ + { + Name: "next", + Type: "url", + Description: "Provides the URL to get the next set of results, if available.", + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + }, } var routeDescriptorsMap map[string]RouteDescriptor diff --git a/docs/api/v2/routes.go b/docs/api/v2/routes.go index 69f9d901..d18860f5 100644 --- a/docs/api/v2/routes.go +++ b/docs/api/v2/routes.go @@ -11,6 +11,7 @@ const ( RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" ) var allEndpoints = []string{ From 0790a298ed04744b6d65d21f21c17a70cd67c02b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 2 Jun 2015 20:16:59 -0700 Subject: [PATCH 219/501] Paginate catalog and tag results with Link header Move the specification to use a Link header, rather than a "next" entry in the json results. This prevents requiring clients from parsing the request body to issue the next request. It also ensures that the returned response body does not change in between requests. The ordering of the specification has been slightly tweaked, as well. Listing image tags has been moved after the catalog specification. Tag pagination now heavily references catalog pagination. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 4eec6492..ee895b72 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -87,6 +87,13 @@ var ( Format: "", } + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + paginationParameters = []ParameterDescriptor{ { Name: "n", @@ -462,14 +469,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Length of the JSON response body.", Format: "", }, - }, - Fields: []ParameterDescriptor{ - { - Name: "next", - Type: "url", - Description: "Provides the URL to get the next set of results, if available.", - Format: "", - }, + linkHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -479,7 +479,6 @@ var routeDescriptors = []RouteDescriptor{ , ... ], - "next": "?last=&n=" }`, }, }, @@ -1439,14 +1438,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Length of the JSON response body.", Format: "", }, - }, - Fields: []ParameterDescriptor{ - { - Name: "next", - Type: "url", - Description: "Provides the URL to get the next set of results, if available.", - Format: "", - }, + linkHeader, }, }, }, From f3207e76c878e4859018185c4fec9162d327e1e8 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Mon, 13 Jul 2015 13:08:13 -0700 Subject: [PATCH 220/501] Catalog for V2 API Implementation This change adds a basic catalog endpoint to the API, which returns a list, or partial list, of all of the repositories contained in the registry. Calls to this endpoint are somewhat expensive, as every call requires walking a large part of the registry. Instead, to maintain a list of repositories, you would first call the catalog endpoint to get an initial list, and then use the events API to maintain any future repositories. Signed-off-by: Patrick Devine --- docs/api/v2/routes.go | 1 + docs/api/v2/urls.go | 12 +++ docs/client/repository.go | 68 +++++++++++++++++ docs/client/repository_test.go | 41 ++++++++++ docs/handlers/api_test.go | 136 +++++++++++++++++++++++++++++++++ docs/handlers/app.go | 28 ++++++- docs/handlers/catalog.go | 82 ++++++++++++++++++++ docs/handlers/context.go | 3 + docs/storage/catalog.go | 62 +++++++++++++++ docs/storage/catalog_test.go | 127 ++++++++++++++++++++++++++++++ docs/storage/registry.go | 9 +++ 11 files changed, 568 insertions(+), 1 deletion(-) create mode 100644 docs/handlers/catalog.go create mode 100644 docs/storage/catalog.go create mode 100644 docs/storage/catalog_test.go diff --git a/docs/api/v2/routes.go b/docs/api/v2/routes.go index d18860f5..5b80d5be 100644 --- a/docs/api/v2/routes.go +++ b/docs/api/v2/routes.go @@ -16,6 +16,7 @@ const ( var allEndpoints = []string{ RouteNameManifest, + RouteNameCatalog, RouteNameTags, RouteNameBlob, RouteNameBlobUpload, diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 60aad565..42974394 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -100,6 +100,18 @@ func (ub *URLBuilder) BuildBaseURL() (string, error) { return baseURL.String(), nil } +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { route := ub.cloneRoute(RouteNameTags) diff --git a/docs/client/repository.go b/docs/client/repository.go index fc90cb6e..6d2fd6e7 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -444,3 +444,71 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi return distribution.Descriptor{}, handleErrorResponse(resp) } } + +// NewCatalog can be used to get a list of repositories +func NewCatalog(ctx context.Context, baseURL string, transport http.RoundTripper) (distribution.CatalogService, error) { + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + return &catalog{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type catalog struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +func (c *catalog) Get(maxEntries int, last string) ([]string, bool, error) { + var repos []string + + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + u, err := c.ub.BuildCatalogURL(values) + if err != nil { + return nil, false, err + } + + resp, err := c.client.Get(u) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return nil, false, err + } + + repos = ctlg.Repositories + default: + return nil, false, handleErrorResponse(resp) + } + + return repos, false, nil +} diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 3a91be98..e9735cd4 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -8,6 +8,7 @@ import ( "log" "net/http" "net/http/httptest" + "strconv" "strings" "testing" "time" @@ -77,6 +78,23 @@ func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.R }) } +func addTestCatalog(content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/_catalog", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + }), + }, + }) +} + func TestBlobFetch(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap @@ -732,3 +750,26 @@ func TestManifestUnauthorized(t *testing.T) { t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) } } + +func TestCatalog(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog([]byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + ctlg, err := NewCatalog(ctx, e, nil) + if err != nil { + t.Fatal(err) + } + + repos, _, err := ctlg.Get(0, "") + if err != nil { + t.Fatal(err) + } + + if len(repos) != 3 { + t.Fatalf("Got wrong number of repos") + } +} diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 8d631941..d768a116 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -60,6 +60,85 @@ func TestCheckAPI(t *testing.T) { } } +func TestCatalogAPI(t *testing.T) { + env := newTestEnv(t) + + values := url.Values{"last": []string{""}, "n": []string{"100"}} + + catalogURL, err := env.builder.BuildCatalogURL(values) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } + + // ----------------------------------- + // try to get an empty catalog + resp, err := http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + var ctlg struct { + Repositories []string `json:"repositories"` + } + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // we haven't pushed anything to the registry yet + if ctlg.Repositories != nil { + t.Fatalf("repositories has unexpected values") + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + + // ----------------------------------- + // push something to the registry and try again + imageName := "foo/bar" + createRepository(env, t, imageName, "sometag") + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != 1 { + t.Fatalf("repositories has unexpected values") + } + + if !contains(ctlg.Repositories, imageName) { + t.Fatalf("didn't find our repository '%s' in the catalog", imageName) + } + + if resp.Header.Get("Link") != "" { + t.Fatalf("repositories has more data when none expected") + } + +} + +func contains(elems []string, e string) bool { + for _, elem := range elems { + if elem == e { + return true + } + } + return false +} + func TestURLPrefix(t *testing.T) { config := configuration.Configuration{ Storage: configuration.Storage{ @@ -869,3 +948,60 @@ func checkErr(t *testing.T, err error, msg string) { t.Fatalf("unexpected error %s: %v", msg, err) } } + +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + payload, err := signedManifest.Payload() + checkErr(t, err, "getting manifest payload") + + dgst, err := digest.FromBytes(payload) + checkErr(t, err, "digesting manifest") + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) +} diff --git a/docs/handlers/app.go b/docs/handlers/app.go index c895222b..45f97966 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -69,6 +69,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App return http.HandlerFunc(apiBase) }) app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameCatalog, catalogDispatcher) app.register(v2.RouteNameTags, tagsDispatcher) app.register(v2.RouteNameBlob, blobDispatcher) app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) @@ -366,6 +367,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // Add username to request logging context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + catalog := app.registry.Catalog(context) + context.Catalog = catalog + if app.nameRequired(r) { repository, err := app.registry.Repository(context, getName(context)) @@ -493,6 +497,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont } return fmt.Errorf("forbidden: no repository name") } + accessRecords = appendCatalogAccessRecord(accessRecords, r) } ctx, err := app.accessController.Authorized(context.Context, accessRecords...) @@ -538,7 +543,8 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene // nameRequired returns true if the route requires a name. func (app *App) nameRequired(r *http.Request) bool { route := mux.CurrentRoute(r) - return route == nil || route.GetName() != v2.RouteNameBase + routeName := route.GetName() + return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) } // apiBase implements a simple yes-man for doing overall checks against the @@ -588,6 +594,26 @@ func appendAccessRecords(records []auth.Access, method string, repo string) []au return records } +// Add the access record for the catalog if it's our current route +func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { + route := mux.CurrentRoute(r) + routeName := route.GetName() + + if routeName == v2.RouteNameCatalog { + resource := auth.Resource{ + Type: "registry", + Name: "catalog", + } + + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + return accessRecords +} + // applyRegistryMiddleware wraps a registry instance with the configured middlewares func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { diff --git a/docs/handlers/catalog.go b/docs/handlers/catalog.go new file mode 100644 index 00000000..fd2af76e --- /dev/null +++ b/docs/handlers/catalog.go @@ -0,0 +1,82 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/gorilla/handlers" +) + +const maximumReturnedEntries = 100 + +func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { + catalogHandler := &catalogHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(catalogHandler.GetCatalog), + } +} + +type catalogHandler struct { + *Context +} + +type catalogAPIResponse struct { + Repositories []string `json:"repositories"` +} + +func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + lastEntry := q.Get("last") + maxEntries, err := strconv.Atoi(q.Get("n")) + if err != nil || maxEntries < 0 { + maxEntries = maximumReturnedEntries + } + + repos, moreEntries, err := ch.Catalog.Get(maxEntries, lastEntry) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + // Add a link header if there are more entries to retrieve + if moreEntries { + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, repos) + if err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + w.Header().Set("Link", urlStr) + } + + enc := json.NewEncoder(w) + if err := enc.Encode(catalogAPIResponse{ + Repositories: repos, + }); err != nil { + ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// Use the original URL from the request to create a new URL for +// the link header +func createLinkEntry(origURL string, maxEntries int, repos []string) (string, error) { + calledURL, err := url.Parse(origURL) + if err != nil { + return "", err + } + + calledURL.RawQuery = fmt.Sprintf("n=%d&last=%s", maxEntries, repos[len(repos)-1]) + calledURL.Fragment = "" + urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) + + return urlStr, nil +} diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 85a17123..6625551d 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -32,6 +32,9 @@ type Context struct { urlBuilder *v2.URLBuilder + // Catalog allows getting a complete list of the contents of the registry. + Catalog distribution.CatalogService + // TODO(stevvooe): The goal is too completely factor this context and // dispatching out of the web application. Ideally, we should lean on // context.Context for injection of these resources. diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go new file mode 100644 index 00000000..ce184dba --- /dev/null +++ b/docs/storage/catalog.go @@ -0,0 +1,62 @@ +package storage + +import ( + "path" + "sort" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + storageDriver "github.com/docker/distribution/registry/storage/driver" +) + +type catalogSvc struct { + ctx context.Context + driver storageDriver.StorageDriver +} + +var _ distribution.CatalogService = &catalogSvc{} + +// Get returns a list, or partial list, of repositories in the registry. +// Because it's a quite expensive operation, it should only be used when building up +// an initial set of repositories. +func (c *catalogSvc) Get(maxEntries int, lastEntry string) ([]string, bool, error) { + log.Infof("Retrieving up to %d entries of the catalog starting with '%s'", maxEntries, lastEntry) + var repos []string + + root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + if err != nil { + return repos, false, err + } + + Walk(c.ctx, c.driver, root, func(fileInfo storageDriver.FileInfo) error { + filePath := fileInfo.Path() + + // lop the base path off + repoPath := filePath[len(root)+1:] + + _, file := path.Split(repoPath) + if file == "_layers" { + repoPath = strings.TrimSuffix(repoPath, "/_layers") + if repoPath > lastEntry { + repos = append(repos, repoPath) + } + return ErrSkipDir + } else if strings.HasPrefix(file, "_") { + return ErrSkipDir + } + + return nil + }) + + sort.Strings(repos) + + moreEntries := false + if len(repos) > maxEntries { + moreEntries = true + repos = repos[0:maxEntries] + } + + return repos, moreEntries, nil +} diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go new file mode 100644 index 00000000..8d9f3854 --- /dev/null +++ b/docs/storage/catalog_test.go @@ -0,0 +1,127 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type setupEnv struct { + ctx context.Context + driver driver.StorageDriver + expected []string + registry distribution.Namespace + catalog distribution.CatalogService +} + +func setupFS(t *testing.T) *setupEnv { + d := inmemory.New() + c := []byte("") + ctx := context.Background() + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider()) + rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) + + repos := []string{ + "/foo/a/_layers/1", + "/foo/b/_layers/2", + "/bar/c/_layers/3", + "/bar/d/_layers/4", + "/foo/d/in/_layers/5", + "/an/invalid/repo", + "/bar/d/_layers/ignored/dir/6", + } + + for _, repo := range repos { + if err := d.PutContent(ctx, rootpath+repo, c); err != nil { + t.Fatalf("Unable to put to inmemory fs") + } + } + + catalog := registry.Catalog(ctx) + + expected := []string{ + "bar/c", + "bar/d", + "foo/a", + "foo/b", + "foo/d/in", + } + + return &setupEnv{ + ctx: ctx, + driver: d, + expected: expected, + registry: registry, + catalog: catalog, + } +} + +func TestCatalog(t *testing.T) { + env := setupFS(t) + + repos, more, _ := env.catalog.Get(100, "") + + if !testEq(repos, env.expected) { + t.Errorf("Expected catalog repos err") + } + + if more { + t.Errorf("Catalog has more values which we aren't expecting") + } +} + +func TestCatalogInParts(t *testing.T) { + env := setupFS(t) + + chunkLen := 2 + + repos, more, _ := env.catalog.Get(chunkLen, "") + if !testEq(repos, env.expected[0:chunkLen]) { + t.Errorf("Expected catalog first chunk err") + } + + if !more { + t.Errorf("Expected more values in catalog") + } + + lastRepo := repos[len(repos)-1] + repos, more, _ = env.catalog.Get(chunkLen, lastRepo) + + if !testEq(repos, env.expected[chunkLen:chunkLen*2]) { + t.Errorf("Expected catalog second chunk err") + } + + if !more { + t.Errorf("Expected more values in catalog") + } + + lastRepo = repos[len(repos)-1] + repos, more, _ = env.catalog.Get(chunkLen, lastRepo) + + if !testEq(repos, env.expected[chunkLen*2:chunkLen*3-1]) { + t.Errorf("Expected catalog third chunk err") + } + + if more { + t.Errorf("Catalog has more values which we aren't expecting") + } + +} + +func testEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for count := range a { + if a[count] != b[count] { + return false + } + } + + return true +} diff --git a/docs/storage/registry.go b/docs/storage/registry.go index cf0fe3e7..17035555 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -55,6 +55,15 @@ func (reg *registry) Scope() distribution.Scope { return distribution.GlobalScope } +// Catalog returns an instance of the catalog service which can be +// used to dump all of the repositories in a registry +func (reg *registry) Catalog(ctx context.Context) distribution.CatalogService { + return &catalogSvc{ + ctx: ctx, + driver: reg.blobStore.driver, + } +} + // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. From bf62b7ebb72d4872f438704e27506d18873262ae Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Fri, 17 Jul 2015 11:42:47 -0700 Subject: [PATCH 221/501] Create Repositories method This change removes the Catalog Service and replaces it with a more simplistic Repositories() method for obtaining a catalog of all repositories. The Repositories method takes a pre-allocated slice and fills it up to the size of the slice and returns the amount filled. The catalog is returned lexicographically and will start being filled from the last entry passed to Repositories(). If there are no more entries to fill, io.EOF will be returned. Signed-off-by: Patrick Devine Conflicts: registry/client/repository.go registry/handlers/api_test.go --- docs/client/repository.go | 75 ++++++++++++++++-------------- docs/client/repository_test.go | 71 ++++++++++++++++++++++------ docs/handlers/api_test.go | 85 ++++++++++++++++++++++++++++++---- docs/handlers/app.go | 3 -- docs/handlers/catalog.go | 25 +++++++--- docs/handlers/context.go | 3 -- docs/storage/catalog.go | 51 ++++++++++---------- docs/storage/catalog_test.go | 61 +++++++++++------------- docs/storage/registry.go | 9 ---- 9 files changed, 246 insertions(+), 137 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 6d2fd6e7..6979cc4d 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -445,34 +445,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } } -// NewCatalog can be used to get a list of repositories -func NewCatalog(ctx context.Context, baseURL string, transport http.RoundTripper) (distribution.CatalogService, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - } - - return &catalog{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type catalog struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -func (c *catalog) Get(maxEntries int, last string) ([]string, bool, error) { - var repos []string - +func buildCatalogValues(maxEntries int, last string) url.Values { values := url.Values{} if maxEntries > 0 { @@ -483,14 +456,35 @@ func (c *catalog) Get(maxEntries int, last string) ([]string, bool, error) { values.Add("last", last) } - u, err := c.ub.BuildCatalogURL(values) + return values +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func Repositories(ctx context.Context, baseURL string, entries []string, last string, transport http.RoundTripper) (int, error) { + var numFilled int + var returnErr error + + ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { - return nil, false, err + return 0, err } - resp, err := c.client.Get(u) + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + values := buildCatalogValues(len(entries), last) + u, err := ub.BuildCatalogURL(values) if err != nil { - return nil, false, err + return 0, err + } + + resp, err := client.Get(u) + if err != nil { + return 0, err } defer resp.Body.Close() @@ -502,13 +496,22 @@ func (c *catalog) Get(maxEntries int, last string) ([]string, bool, error) { decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&ctlg); err != nil { - return nil, false, err + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF } - repos = ctlg.Repositories default: - return nil, false, handleErrorResponse(resp) + return 0, handleErrorResponse(resp) } - return repos, false, nil + return numFilled, returnErr } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index e9735cd4..b803d754 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "encoding/json" "fmt" + "io" "log" "net/http" "net/http/httptest" @@ -78,19 +79,24 @@ func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.R }) } -func addTestCatalog(content []byte, m *testutil.RequestResponseMap) { +func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { + headers := map[string][]string{ + "Content-Length": {strconv.Itoa(len(content))}, + "Content-Type": {"application/json; charset=utf-8"}, + } + if link != "" { + headers["Link"] = append(headers["Link"], link) + } + *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/_catalog", + Route: route, }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, - Headers: http.Header(map[string][]string{ - "Content-Length": {strconv.Itoa(len(content))}, - "Content-Type": {"application/json; charset=utf-8"}, - }), + Headers: http.Header(headers), }, }) } @@ -753,23 +759,58 @@ func TestManifestUnauthorized(t *testing.T) { func TestCatalog(t *testing.T) { var m testutil.RequestResponseMap - addTestCatalog([]byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), &m) + addTestCatalog( + "/v2/_catalog?n=5", + []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) e, c := testServer(m) defer c() + entries := make([]string, 5) + ctx := context.Background() - ctlg, err := NewCatalog(ctx, e, nil) - if err != nil { + numFilled, err := Repositories(ctx, e, entries, "", nil) + if err != io.EOF { t.Fatal(err) } - repos, _, err := ctlg.Get(0, "") - if err != nil { - t.Fatal(err) - } - - if len(repos) != 3 { + if numFilled != 3 { + t.Fatalf("Got wrong number of repos") + } +} + +func TestCatalogInParts(t *testing.T) { + var m testutil.RequestResponseMap + addTestCatalog( + "/v2/_catalog?n=2", + []byte("{\"repositories\":[\"bar\", \"baz\"]}"), + "", &m) + addTestCatalog( + "/v2/_catalog?last=baz&n=2", + []byte("{\"repositories\":[\"foo\"]}"), + "", &m) + + e, c := testServer(m) + defer c() + + entries := make([]string, 2) + + ctx := context.Background() + numFilled, err := Repositories(ctx, e, entries, "", nil) + if err != nil { + t.Fatal(err) + } + + if numFilled != 2 { + t.Fatalf("Got wrong number of repos") + } + + numFilled, err = Repositories(ctx, e, entries, "baz", nil) + if err != io.EOF { + t.Fatal(err) + } + + if numFilled != 1 { t.Fatalf("Got wrong number of repos") } } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index d768a116..4473eb99 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -13,6 +13,8 @@ import ( "os" "path" "reflect" + "regexp" + "strconv" "strings" "testing" @@ -60,10 +62,14 @@ func TestCheckAPI(t *testing.T) { } } +// TestCatalogAPI tests the /v2/_catalog endpoint func TestCatalogAPI(t *testing.T) { + chunkLen := 2 env := newTestEnv(t) - values := url.Values{"last": []string{""}, "n": []string{"100"}} + values := url.Values{ + "last": []string{""}, + "n": []string{strconv.Itoa(chunkLen)}} catalogURL, err := env.builder.BuildCatalogURL(values) if err != nil { @@ -90,7 +96,7 @@ func TestCatalogAPI(t *testing.T) { } // we haven't pushed anything to the registry yet - if ctlg.Repositories != nil { + if len(ctlg.Repositories) != 0 { t.Fatalf("repositories has unexpected values") } @@ -100,8 +106,49 @@ func TestCatalogAPI(t *testing.T) { // ----------------------------------- // push something to the registry and try again - imageName := "foo/bar" - createRepository(env, t, imageName, "sometag") + images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} + + for _, image := range images { + createRepository(env, t, image, "sometag") + } + + resp, err = http.Get(catalogURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing catalog api check", resp, http.StatusOK) + + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&ctlg); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if len(ctlg.Repositories) != chunkLen { + t.Fatalf("repositories has unexpected values") + } + + for _, image := range images[:chunkLen] { + if !contains(ctlg.Repositories, image) { + t.Fatalf("didn't find our repository '%s' in the catalog", image) + } + } + + link := resp.Header.Get("Link") + if link == "" { + t.Fatalf("repositories has less data than expected") + } + + newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) + + // ----------------------------------- + // get the last chunk of data + + catalogURL, err = env.builder.BuildCatalogURL(newValues) + if err != nil { + t.Fatalf("unexpected error building catalog url: %v", err) + } resp, err = http.Get(catalogURL) if err != nil { @@ -120,14 +167,36 @@ func TestCatalogAPI(t *testing.T) { t.Fatalf("repositories has unexpected values") } - if !contains(ctlg.Repositories, imageName) { - t.Fatalf("didn't find our repository '%s' in the catalog", imageName) + lastImage := images[len(images)-1] + if !contains(ctlg.Repositories, lastImage) { + t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) } - if resp.Header.Get("Link") != "" { - t.Fatalf("repositories has more data when none expected") + link = resp.Header.Get("Link") + if link != "" { + t.Fatalf("catalog has unexpected data") + } +} + +func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { + re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") + matches := re.FindStringSubmatch(urlStr) + + if len(matches) != 2 { + t.Fatalf("Catalog link address response was incorrect") + } + linkURL, _ := url.Parse(matches[1]) + urlValues := linkURL.Query() + + if urlValues.Get("n") != strconv.Itoa(numEntries) { + t.Fatalf("Catalog link entry size is incorrect") } + if urlValues.Get("last") != last { + t.Fatal("Catalog link last entry is incorrect") + } + + return urlValues } func contains(elems []string, e string) bool { diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 45f97966..f61b2c1e 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -367,9 +367,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // Add username to request logging context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) - catalog := app.registry.Catalog(context) - context.Catalog = catalog - if app.nameRequired(r) { repository, err := app.registry.Repository(context, getName(context)) diff --git a/docs/handlers/catalog.go b/docs/handlers/catalog.go index fd2af76e..6ec1fe55 100644 --- a/docs/handlers/catalog.go +++ b/docs/handlers/catalog.go @@ -3,6 +3,7 @@ package handlers import ( "encoding/json" "fmt" + "io" "net/http" "net/url" "strconv" @@ -32,6 +33,8 @@ type catalogAPIResponse struct { } func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { + var moreEntries = true + q := r.URL.Query() lastEntry := q.Get("last") maxEntries, err := strconv.Atoi(q.Get("n")) @@ -39,8 +42,12 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { maxEntries = maximumReturnedEntries } - repos, moreEntries, err := ch.Catalog.Get(maxEntries, lastEntry) - if err != nil { + repos := make([]string, maxEntries) + + filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) + if err == io.EOF { + moreEntries = false + } else if err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -49,7 +56,8 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { // Add a link header if there are more entries to retrieve if moreEntries { - urlStr, err := createLinkEntry(r.URL.String(), maxEntries, repos) + lastEntry = repos[len(repos)-1] + urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) if err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return @@ -59,7 +67,7 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { enc := json.NewEncoder(w) if err := enc.Encode(catalogAPIResponse{ - Repositories: repos, + Repositories: repos[0:filled], }); err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return @@ -68,13 +76,18 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { // Use the original URL from the request to create a new URL for // the link header -func createLinkEntry(origURL string, maxEntries int, repos []string) (string, error) { +func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { calledURL, err := url.Parse(origURL) if err != nil { return "", err } - calledURL.RawQuery = fmt.Sprintf("n=%d&last=%s", maxEntries, repos[len(repos)-1]) + v := url.Values{} + v.Add("n", strconv.Itoa(maxEntries)) + v.Add("last", lastEntry) + + calledURL.RawQuery = v.Encode() + calledURL.Fragment = "" urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 6625551d..85a17123 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -32,9 +32,6 @@ type Context struct { urlBuilder *v2.URLBuilder - // Catalog allows getting a complete list of the contents of the registry. - Catalog distribution.CatalogService - // TODO(stevvooe): The goal is too completely factor this context and // dispatching out of the web application. Ideally, we should lean on // context.Context for injection of these resources. diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go index ce184dba..470894b7 100644 --- a/docs/storage/catalog.go +++ b/docs/storage/catalog.go @@ -1,36 +1,38 @@ package storage import ( + "errors" + "io" "path" "sort" "strings" - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution" "github.com/docker/distribution/context" - storageDriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver" ) -type catalogSvc struct { - ctx context.Context - driver storageDriver.StorageDriver -} - -var _ distribution.CatalogService = &catalogSvc{} - -// Get returns a list, or partial list, of repositories in the registry. +// Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. -func (c *catalogSvc) Get(maxEntries int, lastEntry string) ([]string, bool, error) { - log.Infof("Retrieving up to %d entries of the catalog starting with '%s'", maxEntries, lastEntry) - var repos []string +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var foundRepos []string + var errVal error + + if len(repos) == 0 { + return 0, errors.New("no space in slice") + } root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) if err != nil { - return repos, false, err + return 0, err } - Walk(c.ctx, c.driver, root, func(fileInfo storageDriver.FileInfo) error { + // Walk each of the directories in our storage. Unfortunately since there's no + // guarantee that storage will return files in lexigraphical order, we have + // to store everything another slice, sort it and then copy it back to our + // passed in slice. + + Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() // lop the base path off @@ -39,8 +41,8 @@ func (c *catalogSvc) Get(maxEntries int, lastEntry string) ([]string, bool, erro _, file := path.Split(repoPath) if file == "_layers" { repoPath = strings.TrimSuffix(repoPath, "/_layers") - if repoPath > lastEntry { - repos = append(repos, repoPath) + if repoPath > last { + foundRepos = append(foundRepos, repoPath) } return ErrSkipDir } else if strings.HasPrefix(file, "_") { @@ -50,13 +52,14 @@ func (c *catalogSvc) Get(maxEntries int, lastEntry string) ([]string, bool, erro return nil }) - sort.Strings(repos) + sort.Strings(foundRepos) + n = copy(repos, foundRepos) - moreEntries := false - if len(repos) > maxEntries { - moreEntries = true - repos = repos[0:maxEntries] + // Signal that we have no more entries by setting EOF + if len(foundRepos) <= len(repos) { + errVal = io.EOF } - return repos, moreEntries, nil + return n, errVal + } diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index 8d9f3854..a9a046a7 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -1,6 +1,7 @@ package storage import ( + "io" "testing" "github.com/docker/distribution" @@ -15,7 +16,6 @@ type setupEnv struct { driver driver.StorageDriver expected []string registry distribution.Namespace - catalog distribution.CatalogService } func setupFS(t *testing.T) *setupEnv { @@ -41,8 +41,6 @@ func setupFS(t *testing.T) *setupEnv { } } - catalog := registry.Catalog(ctx) - expected := []string{ "bar/c", "bar/d", @@ -56,20 +54,21 @@ func setupFS(t *testing.T) *setupEnv { driver: d, expected: expected, registry: registry, - catalog: catalog, } } func TestCatalog(t *testing.T) { env := setupFS(t) - repos, more, _ := env.catalog.Get(100, "") + p := make([]string, 50) - if !testEq(repos, env.expected) { + numFilled, err := env.registry.Repositories(env.ctx, p, "") + + if !testEq(p, env.expected, numFilled) { t.Errorf("Expected catalog repos err") } - if more { + if err != io.EOF { t.Errorf("Catalog has more values which we aren't expecting") } } @@ -78,50 +77,46 @@ func TestCatalogInParts(t *testing.T) { env := setupFS(t) chunkLen := 2 + p := make([]string, chunkLen) - repos, more, _ := env.catalog.Get(chunkLen, "") - if !testEq(repos, env.expected[0:chunkLen]) { + numFilled, err := env.registry.Repositories(env.ctx, p, "") + if err == io.EOF || numFilled != len(p) { + t.Errorf("Expected more values in catalog") + } + + if !testEq(p, env.expected[0:chunkLen], numFilled) { t.Errorf("Expected catalog first chunk err") } - if !more { + lastRepo := p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) + + if err == io.EOF || numFilled != len(p) { t.Errorf("Expected more values in catalog") } - lastRepo := repos[len(repos)-1] - repos, more, _ = env.catalog.Get(chunkLen, lastRepo) - - if !testEq(repos, env.expected[chunkLen:chunkLen*2]) { + if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { t.Errorf("Expected catalog second chunk err") } - if !more { - t.Errorf("Expected more values in catalog") - } + lastRepo = p[len(p)-1] + numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) - lastRepo = repos[len(repos)-1] - repos, more, _ = env.catalog.Get(chunkLen, lastRepo) - - if !testEq(repos, env.expected[chunkLen*2:chunkLen*3-1]) { - t.Errorf("Expected catalog third chunk err") - } - - if more { + if err != io.EOF { t.Errorf("Catalog has more values which we aren't expecting") } -} - -func testEq(a, b []string) bool { - if len(a) != len(b) { - return false + if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { + t.Errorf("Expected catalog third chunk err") } - for count := range a { - if a[count] != b[count] { +} + +func testEq(a, b []string, size int) bool { + for cnt := 0; cnt < size-1; cnt++ { + if a[cnt] != b[cnt] { return false } } - return true } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 17035555..cf0fe3e7 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -55,15 +55,6 @@ func (reg *registry) Scope() distribution.Scope { return distribution.GlobalScope } -// Catalog returns an instance of the catalog service which can be -// used to dump all of the repositories in a registry -func (reg *registry) Catalog(ctx context.Context) distribution.CatalogService { - return &catalogSvc{ - ctx: ctx, - driver: reg.blobStore.driver, - } -} - // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. From aae59d54ef604e02732b493a73b464f85f8f1005 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Wed, 22 Jul 2015 10:18:36 -0700 Subject: [PATCH 222/501] Make Error.Error() return the post-arg-substitution Message Missed this during the removal of the args property Signed-off-by: Doug Davis --- docs/api/errcode/errors.go | 2 +- docs/api/errcode/errors_test.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index acdeb022..fdaddbcf 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -106,7 +106,7 @@ func (e Error) ErrorCode() ErrorCode { func (e Error) Error() string { return fmt.Sprintf("%s: %s", strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Code.Message()) + e.Message) } // WithDetail will return a new Error, based on the current one, but with diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index 1f0aaf91..27fb1cec 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -126,6 +126,11 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) } + exp1 = "test3: " + exp1 + if e1.Error() != exp1 { + t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) + } + // Test again with a single value this time errs = Errors{ErrorCodeUnknown} expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" From a49594a0e19560969396f5fcbed657062524be8f Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Wed, 22 Jul 2015 15:18:03 -0700 Subject: [PATCH 223/501] Add Registry to client bindings for Repositories The way Repositories() was initially called was somewhat different than other parts of the client bindings because there was no way to instantiate a Namespace. This change implements a NewRegistry() function which changes it so that Repositories() can be called the way one would expect. It doesn't implement any of the other functions of Namespaces. Signed-off-by: Patrick Devine --- docs/client/repository.go | 134 +++++++++++++++++++-------------- docs/client/repository_test.go | 16 +++- 2 files changed, 90 insertions(+), 60 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 6979cc4d..29effcce 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -21,6 +21,83 @@ import ( "github.com/docker/distribution/registry/storage/cache/memory" ) +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + + default: + return 0, handleErrorResponse(resp) + } + + return numFilled, returnErr +} + // NewRepository creates a new Repository for the given repository name and base URL func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { if err := v2.ValidateRepositoryName(name); err != nil { @@ -458,60 +535,3 @@ func buildCatalogValues(maxEntries int, last string) url.Values { return values } - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func Repositories(ctx context.Context, baseURL string, entries []string, last string, transport http.RoundTripper) (int, error) { - var numFilled int - var returnErr error - - ub, err := v2.NewURLBuilderFromString(baseURL) - if err != nil { - return 0, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - } - - values := buildCatalogValues(len(entries), last) - u, err := ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - - default: - return 0, handleErrorResponse(resp) - } - - return numFilled, returnErr -} diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index b803d754..232501aa 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -768,8 +768,13 @@ func TestCatalog(t *testing.T) { entries := make([]string, 5) + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() - numFilled, err := Repositories(ctx, e, entries, "", nil) + numFilled, err := r.Repositories(ctx, entries, "") if err != io.EOF { t.Fatal(err) } @@ -795,8 +800,13 @@ func TestCatalogInParts(t *testing.T) { entries := make([]string, 2) + r, err := NewRegistry(context.Background(), e, nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() - numFilled, err := Repositories(ctx, e, entries, "", nil) + numFilled, err := r.Repositories(ctx, entries, "") if err != nil { t.Fatal(err) } @@ -805,7 +815,7 @@ func TestCatalogInParts(t *testing.T) { t.Fatalf("Got wrong number of repos") } - numFilled, err = Repositories(ctx, e, entries, "baz", nil) + numFilled, err = r.Repositories(ctx, entries, "baz") if err != io.EOF { t.Fatal(err) } From 683dc197782ea8f4ea2b5aaef624d6cbc4e637a4 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 22 Jul 2015 18:16:20 -0700 Subject: [PATCH 224/501] Unify the testcases for the two tests in names_test.go Signed-off-by: Aaron Lehmann --- docs/api/v2/names_test.go | 229 +++++++++++++------------------------- 1 file changed, 76 insertions(+), 153 deletions(-) diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 3a017037..89ab9c61 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -6,10 +6,18 @@ import ( "testing" ) -func TestRepositoryComponentNameRegexp(t *testing.T) { - for _, testcase := range []struct { +var ( + // regexpTestcases is a unified set of testcases for + // TestValidateRepositoryName and TestRepositoryNameRegexp. + // Some of them are valid inputs for one and not the other. + regexpTestcases = []struct { + // input is the repository name or name component testcase input string - err error + // err is the error expected from ValidateRepositoryName, or nil + err error + // invalid should be true if the testcase is *not* expected to + // match RepositoryNameRegexp + invalid bool }{ { input: "", @@ -37,12 +45,14 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { input: "a/a/a/b/b", }, { - input: "a/a/a/a/", - err: ErrRepositoryNameComponentInvalid, + input: "a/a/a/a/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "a//a/a", - err: ErrRepositoryNameComponentInvalid, + input: "a//a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { input: "a", @@ -56,9 +66,27 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { { input: "a/aa/a", }, + { + input: "foo.com/", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + // TODO: this testcase should be valid once we switch to + // the reference package. + input: "foo.com:8080/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "foo.com/bar", + }, { input: "foo.com/bar/baz", }, + { + input: "foo.com/bar/baz/quux", + }, { input: "blog.foo.com/bar/baz", }, @@ -66,8 +94,9 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { input: "asdf", }, { - input: "asdf$$^/aa", - err: ErrRepositoryNameComponentInvalid, + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { input: "aa-a/aa", @@ -79,8 +108,9 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { input: "a-a/a-a", }, { - input: "a-/a/a/a", - err: ErrRepositoryNameComponentInvalid, + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { input: strings.Repeat("a", 255), @@ -90,42 +120,57 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { err: ErrRepositoryNameLong, }, { - input: "-foo/bar", - err: ErrRepositoryNameComponentInvalid, + input: "-foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "foo/bar-", - err: ErrRepositoryNameComponentInvalid, + input: "foo/bar-", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "foo-/bar", - err: ErrRepositoryNameComponentInvalid, + input: "foo-/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "foo/-bar", - err: ErrRepositoryNameComponentInvalid, + input: "foo/-bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "_foo/bar", - err: ErrRepositoryNameComponentInvalid, + input: "_foo/bar", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "foo/bar_", - err: ErrRepositoryNameComponentInvalid, + input: "foo/bar_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "____/____", - err: ErrRepositoryNameComponentInvalid, + input: "____/____", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "_docker/_docker", - err: ErrRepositoryNameComponentInvalid, + input: "_docker/_docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, { - input: "docker_/docker_", - err: ErrRepositoryNameComponentInvalid, + input: "docker_/docker_", + err: ErrRepositoryNameComponentInvalid, + invalid: true, }, - } { + } +) + +// TestValidateRepositoryName tests the ValidateRepositoryName function, +// which uses RepositoryNameComponentAnchoredRegexp for validation +func TestValidateRepositoryName(t *testing.T) { + for _, testcase := range regexpTestcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() @@ -151,129 +196,7 @@ func TestRepositoryComponentNameRegexp(t *testing.T) { } func TestRepositoryNameRegexp(t *testing.T) { - for _, testcase := range []struct { - input string - invalid bool - }{ - { - input: "short", - }, - { - input: "simple/name", - }, - { - input: "library/ubuntu", - }, - { - input: "docker/stevvooe/app", - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - }, - { - input: "aa/aa/bb/bb/bb", - }, - { - input: "a/a/a/b/b", - }, - { - input: "a/a/a/a/", - invalid: true, - }, - { - input: "a//a/a", - invalid: true, - }, - { - input: "a", - }, - { - input: "a/aa", - }, - { - input: "aa/a", - }, - { - input: "a/aa/a", - }, - { - input: "foo.com/", - invalid: true, - }, - { - // currently not allowed by the regex - input: "foo.com:8080/bar", - invalid: true, - }, - { - input: "foo.com/bar", - }, - { - input: "foo.com/bar/baz", - }, - { - input: "foo.com/bar/baz/quux", - }, - { - input: "blog.foo.com/bar/baz", - }, - { - input: "asdf", - }, - { - input: "asdf$$^/aa", - invalid: true, - }, - { - input: "aa-a/aa", - }, - { - input: "aa/aa", - }, - { - input: "a-a/a-a", - }, - { - input: "a-/a/a/a", - invalid: true, - }, - { - input: "-foo/bar", - invalid: true, - }, - { - input: "foo/bar-", - invalid: true, - }, - { - input: "foo-/bar", - invalid: true, - }, - { - input: "foo/-bar", - invalid: true, - }, - { - input: "_foo/bar", - invalid: true, - }, - { - input: "foo/bar_", - invalid: true, - }, - { - input: "____/____", - invalid: true, - }, - { - input: "_docker/_docker", - invalid: true, - }, - { - input: "docker_/docker_", - invalid: true, - }, - } { + for _, testcase := range regexpTestcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() From 153ef32124575a42aab686fa3544cc1bbc235f97 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 22 Jul 2015 20:00:28 -0700 Subject: [PATCH 225/501] Clean up pagination specification Some missing descriptions and error code for tags pagination was cleaned up to ensure clarity. Specifically, we ensure the request variations are named and the proper error codes are included. Signed-off-by: Stephen J Day --- docs/api/v2/descriptors.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index ee895b72..635cb7f9 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -398,6 +398,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "Fetch the tags under the repository identified by `name`.", Requests: []RequestDescriptor{ { + Name: "Tags", + Description: "Return all tags for the repository", Headers: []ParameterDescriptor{ hostHeader, authHeader, @@ -455,6 +457,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, { + Name: "Tags Paginated", Description: "Return a portion of the tags for the specified repository.", PathParameters: []ParameterDescriptor{nameParameterDescriptor}, QueryParameters: paginationParameters, @@ -483,6 +486,30 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + }, }, }, }, From 0ec762c0f02cbad9dec96cd27e4ccaa6036da7f5 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Thu, 23 Jul 2015 07:09:48 -0700 Subject: [PATCH 226/501] Remove dead code thanks to @tiborvass for noticing Signed-off-by: Doug Davis --- docs/client/errors.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index 327fea6d..2c168400 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -53,13 +53,6 @@ func handleErrorResponse(resp *http.Response) error { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response) - /* - return &errcode.Error{ - Code: v2.ErrorCodeUnauthorized, - Message: v2.ErrorCodeUnauthorized.Message(), - Detail: uErr.Response, - } - */ } return err } From 4a2300aaa92156ef6388521c2b9eabeae4e3cf08 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Jul 2015 19:39:56 -0700 Subject: [PATCH 227/501] Simplify auth.Challenge interface to SetHeaders This removes the erroneous http.Handler interface in favor a simple SetHeaders method that only operattes on the response. Several unnecessary uses of pointer types were also fixed up. Signed-off-by: Stephen J Day --- docs/auth/auth.go | 12 ++++++------ docs/auth/htpasswd/access.go | 10 ++++++---- docs/auth/htpasswd/access_test.go | 2 +- docs/auth/silly/access.go | 7 +++++-- docs/auth/silly/access_test.go | 2 +- docs/auth/token/accesscontroller.go | 20 ++++++++------------ docs/handlers/app.go | 2 +- 7 files changed, 28 insertions(+), 27 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index 3107537e..7ae2a157 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -61,12 +61,12 @@ type Access struct { // header values based on the error. type Challenge interface { error - // ServeHTTP prepares the request to conduct the appropriate challenge - // response by adding the appropriate HTTP challenge header on the response - // message. Callers are expected to set the appropriate HTTP status code - // (e.g. 401) themselves. Because no body is written, users may write a - // custom body after calling ServeHTTP. - ServeHTTP(w http.ResponseWriter, r *http.Request) + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) } // AccessController controls access to registry resources based on a request diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index b8c4d41e..bb153f4b 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -87,12 +87,14 @@ type challenge struct { err error } -func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - header := fmt.Sprintf("Basic realm=%q", ch.realm) - w.Header().Set("WWW-Authenticate", header) +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) } -func (ch *challenge) Error() string { +func (ch challenge) Error() string { return fmt.Sprintf("basic authentication challenge: %#v", ch) } diff --git a/docs/auth/htpasswd/access_test.go b/docs/auth/htpasswd/access_test.go index 79e9422c..db040547 100644 --- a/docs/auth/htpasswd/access_test.go +++ b/docs/auth/htpasswd/access_test.go @@ -48,7 +48,7 @@ func TestBasicAccessController(t *testing.T) { if err != nil { switch err := err.(type) { case auth.Challenge: - err.ServeHTTP(w, r) + err.SetHeaders(w) w.WriteHeader(http.StatusUnauthorized) return default: diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 7ae43e25..7d6efb07 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -75,7 +75,10 @@ type challenge struct { scope string } -func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) if ch.scope != "" { @@ -85,7 +88,7 @@ func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("WWW-Authenticate", header) } -func (ch *challenge) Error() string { +func (ch challenge) Error() string { return fmt.Sprintf("silly authentication challenge: %#v", ch) } diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go index 2fd160de..8b5ecb80 100644 --- a/docs/auth/silly/access_test.go +++ b/docs/auth/silly/access_test.go @@ -21,7 +21,7 @@ func TestSillyAccessController(t *testing.T) { if err != nil { switch err := err.(type) { case auth.Challenge: - err.ServeHTTP(w, r) + err.SetHeaders(w) w.WriteHeader(http.StatusUnauthorized) return default: diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go index c947b67d..0549f8ef 100644 --- a/docs/auth/token/accesscontroller.go +++ b/docs/auth/token/accesscontroller.go @@ -82,20 +82,22 @@ type authChallenge struct { accessSet accessSet } +var _ auth.Challenge = authChallenge{} + // Error returns the internal error string for this authChallenge. -func (ac *authChallenge) Error() string { +func (ac authChallenge) Error() string { return ac.err.Error() } // Status returns the HTTP Response Status Code for this authChallenge. -func (ac *authChallenge) Status() int { +func (ac authChallenge) Status() int { return http.StatusUnauthorized } // challengeParams constructs the value to be used in // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 -func (ac *authChallenge) challengeParams() string { +func (ac authChallenge) challengeParams() string { str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { @@ -111,15 +113,9 @@ func (ac *authChallenge) challengeParams() string { return str } -// SetHeader sets the WWW-Authenticate value for the given header. -func (ac *authChallenge) SetHeader(header http.Header) { - header.Add("WWW-Authenticate", ac.challengeParams()) -} - -// ServeHttp handles writing the challenge response -// by setting the challenge header. -func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ac.SetHeader(w.Header()) +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) } // accessController implements the auth.AccessController interface. diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f61b2c1e..8395ea65 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -502,7 +502,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont switch err := err.(type) { case auth.Challenge: // Add the appropriate WWW-Auth header - err.ServeHTTP(w, r) + err.SetHeaders(w) if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) From e42a8ca5803a036e4259ab66fbac942c00af0733 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Jul 2015 19:48:47 -0700 Subject: [PATCH 228/501] auth.AccessController interface now uses distribution/context Signed-off-by: Stephen J Day --- docs/auth/auth.go | 2 +- docs/auth/htpasswd/access.go | 7 +++---- docs/auth/silly/access.go | 5 ++--- docs/auth/token/accesscontroller.go | 5 ++--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index 7ae2a157..862c8d28 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -34,7 +34,7 @@ import ( "fmt" "net/http" - "golang.org/x/net/context" + "github.com/docker/distribution/context" ) // UserInfo carries information about diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index bb153f4b..5ac3d84a 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -11,9 +11,8 @@ import ( "net/http" "os" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" ) var ( @@ -57,7 +56,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, err } @@ -71,7 +70,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut } if err := ac.htpasswd.authenticateUser(username, password); err != nil { - ctxu.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, err: ErrAuthenticationFailure, diff --git a/docs/auth/silly/access.go b/docs/auth/silly/access.go index 7d6efb07..2b801d94 100644 --- a/docs/auth/silly/access.go +++ b/docs/auth/silly/access.go @@ -12,9 +12,8 @@ import ( "net/http" "strings" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" ) // accessController provides a simple implementation of auth.AccessController @@ -44,7 +43,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, // Authorized simply checks for the existence of the authorization header, // responding with a bearer challenge if it doesn't exist. func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, err } diff --git a/docs/auth/token/accesscontroller.go b/docs/auth/token/accesscontroller.go index 0549f8ef..5b1ff7ca 100644 --- a/docs/auth/token/accesscontroller.go +++ b/docs/auth/token/accesscontroller.go @@ -11,10 +11,9 @@ import ( "os" "strings" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" - "golang.org/x/net/context" ) // accessSet maps a typed, named resource to @@ -220,7 +219,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth. accessSet: newAccessSet(accessItems...), } - req, err := ctxu.GetRequest(ctx) + req, err := context.GetRequest(ctx) if err != nil { return nil, err } From 911c0d9f85a965f6b85d4939ea5824cc8915a235 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Jul 2015 20:51:11 -0700 Subject: [PATCH 229/501] Do not replace logger when adding hooks Because the logger was incorrectly replaced while adding hooks, log output did not include the version and instance ids. The main issue was the the logrus.Entry was replaced with the logger, which included no context. Replacing the logger on the context is not necessary when configuring hooks since we are configuring the contexts logger directly. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f61b2c1e..85b4f70b 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -298,7 +298,14 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { // configureLogHook prepares logging hook parameters. func (app *App) configureLogHook(configuration *configuration.Configuration) { - logger := ctxu.GetLogger(app).(*log.Entry).Logger + entry, ok := ctxu.GetLogger(app).(*log.Entry) + if !ok { + // somehow, we are not using logrus + return + } + + logger := entry.Logger + for _, configHook := range configuration.Log.Hooks { if !configHook.Disabled { switch configHook.Type { @@ -318,7 +325,6 @@ func (app *App) configureLogHook(configuration *configuration.Configuration) { } } } - app.Context = ctxu.WithLogger(app.Context, logger) } func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { From 390bb97a889cd3d528b11b01c3fdc2e821844fa0 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 27 May 2015 10:52:22 -0700 Subject: [PATCH 230/501] Manifest and layer soft deletion. Implement the delete API by implementing soft delete for layers and blobs by removing link files and updating the blob descriptor cache. Deletion is configurable - if it is disabled API calls will return an unsupported error. We invalidate the blob descriptor cache by changing the linkedBlobStore's blobStatter to a blobDescriptorService and naming it blobAccessController. Delete() is added throughout the relevant API to support this functionality. Signed-off-by: Richard Scothern --- docs/client/repository.go | 38 +- docs/client/repository_test.go | 35 +- docs/handlers/api_test.go | 333 +++++++++++++++++- docs/handlers/app.go | 16 +- docs/handlers/app_test.go | 2 +- docs/handlers/blob.go | 29 +- docs/handlers/images.go | 36 +- docs/storage/blob_test.go | 87 ++++- docs/storage/blobstore.go | 12 +- docs/storage/blobwriter.go | 5 + .../cache/cachedblobdescriptorstore.go | 27 +- docs/storage/cache/memory/memory.go | 20 ++ docs/storage/cache/redis/redis.go | 45 ++- docs/storage/cache/suite.go | 37 ++ docs/storage/catalog_test.go | 2 +- docs/storage/linkedblobstore.go | 56 ++- docs/storage/manifeststore.go | 4 +- docs/storage/manifeststore_test.go | 71 +++- docs/storage/registry.go | 30 +- docs/storage/revisionstore.go | 17 +- docs/storage/signaturestore.go | 6 +- 21 files changed, 816 insertions(+), 92 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 29effcce..c4b6a2b9 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -354,7 +354,7 @@ func (ms *manifests) Delete(dgst digest.Digest) error { defer resp.Body.Close() switch resp.StatusCode { - case http.StatusOK: + case http.StatusAccepted: return nil default: return handleErrorResponse(resp) @@ -366,7 +366,8 @@ type blobs struct { ub *v2.URLBuilder client *http.Client - statter distribution.BlobStatter + statter distribution.BlobDescriptorService + distribution.BlobDeleter } func sanitizeLocation(location, source string) (string, error) { @@ -484,6 +485,10 @@ func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter panic("not implemented") } +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + type blobStatter struct { name string ub *v2.URLBuilder @@ -535,3 +540,32 @@ func buildCatalogValues(maxEntries int, last string) url.Values { return values } + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusAccepted: + return nil + default: + return handleErrorResponse(resp) + } +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 232501aa..a7f3e7ce 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -101,6 +101,39 @@ func addTestCatalog(route string, content []byte, link string, m *testutil.Reque }) } +func TestBlobDelete(t *testing.T) { + dgst, _ := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/repo1" + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "DELETE", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + err = l.Delete(ctx, dgst) + if err != nil { + t.Errorf("Error deleting blob: %s", err.Error()) + } + +} + func TestBlobFetch(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap @@ -590,7 +623,7 @@ func TestManifestDelete(t *testing.T) { Route: "/v2/" + repo + "/manifests/" + dgst1.String(), }, Response: testutil.Response{ - StatusCode: http.StatusOK, + StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, }), diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 4473eb99..00ab082f 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -33,7 +33,7 @@ import ( // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { - env := newTestEnv(t) + env := newTestEnv(t, false) baseURL, err := env.builder.BuildBaseURL() if err != nil { @@ -65,7 +65,7 @@ func TestCheckAPI(t *testing.T) { // TestCatalogAPI tests the /v2/_catalog endpoint func TestCatalogAPI(t *testing.T) { chunkLen := 2 - env := newTestEnv(t) + env := newTestEnv(t, false) values := url.Values{ "last": []string{""}, @@ -239,18 +239,16 @@ func TestURLPrefix(t *testing.T) { "Content-Type": []string{"application/json; charset=utf-8"}, "Content-Length": []string{"2"}, }) - } -// TestBlobAPI conducts a full test of the of the blob api. -func TestBlobAPI(t *testing.T) { - // TODO(stevvooe): This test code is complete junk but it should cover the - // complete flow. This must be broken down and checked against the - // specification *before* we submit the final to docker core. - env := newTestEnv(t) +type blobArgs struct { + imageName string + layerFile io.ReadSeeker + layerDigest digest.Digest + tarSumStr string +} - imageName := "foo/bar" - // "build" our layer file +func makeBlobArgs(t *testing.T) blobArgs { layerFile, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) @@ -258,6 +256,66 @@ func TestBlobAPI(t *testing.T) { layerDigest := digest.Digest(tarSumStr) + args := blobArgs{ + imageName: "foo/bar", + layerFile: layerFile, + layerDigest: layerDigest, + tarSumStr: tarSumStr, + } + return args +} + +// TestBlobAPI conducts a full test of the of the blob api. +func TestBlobAPI(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + testBlobAPI(t, env, args) + + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeBlobArgs(t) + testBlobAPI(t, env, args) + +} + +func TestBlobDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + + args := makeBlobArgs(t) + env = testBlobAPI(t, env, args) + testBlobDelete(t, env, args) +} + +func TestBlobDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeBlobArgs(t) + + imageName := args.imageName + layerDigest := args.layerDigest + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting when disabled: %v", err) + } + + checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) +} + +func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + // ----------------------------------- // Test fetch for non-existent content layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) @@ -372,6 +430,7 @@ func TestBlobAPI(t *testing.T) { uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) + // ------------------------ // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) @@ -459,12 +518,188 @@ func TestBlobAPI(t *testing.T) { // Missing tests: // - Upload the same tarsum file under and different repository and // ensure the content remains uncorrupted. + return env +} + +func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { + // Upload a layer + imageName := args.imageName + layerFile := args.layerFile + layerDigest := args.layerDigest + + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf(err.Error()) + } + // --------------- + // Delete a layer + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Try and get it back + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) + + // Delete already deleted layer + resp, err = httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer", resp, http.StatusNotFound) + + // ---------------- + // Attempt to delete a layer with an invalid digest + badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + resp, err = httpDelete(badURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) + + // ---------------- + // Reupload previously deleted blob + layerFile.Seek(0, os.SEEK_SET) + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + layerFile.Seek(0, os.SEEK_SET) + canonicalDigester := digest.Canonical.New() + if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { + t.Fatalf("error copying to digest: %v", err) + } + canonicalDigest := canonicalDigester.Digest() + + // ------------------------ + // Use a head request to see if it exists + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + "Docker-Content-Digest": []string{canonicalDigest.String()}, + }) +} + +func TestDeleteDisabled(t *testing.T) { + env := newTestEnv(t, false) + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + // defer resp.Body.Close() + return resp, err +} + +type manifestArgs struct { + imageName string + signedManifest *manifest.SignedManifest + dgst digest.Digest +} + +func makeManifestArgs(t *testing.T) manifestArgs { + args := manifestArgs{ + imageName: "foo/bar", + } + + return args } func TestManifestAPI(t *testing.T) { - env := newTestEnv(t) + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestAPI(t, env, args) - imageName := "foo/bar" + deleteEnabled = true + env = newTestEnv(t, deleteEnabled) + args = makeManifestArgs(t) + testManifestAPI(t, env, args) +} + +func TestManifestDelete(t *testing.T) { + deleteEnabled := true + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + env, args = testManifestAPI(t, env, args) + testManifestDelete(t, env, args) +} + +func TestManifestDeleteDisabled(t *testing.T) { + deleteEnabled := false + env := newTestEnv(t, deleteEnabled) + args := makeManifestArgs(t) + testManifestDeleteDisabled(t, env, args) +} + +func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { + imageName := args.imageName + manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + resp, err := httpDelete(manifestURL) + if err != nil { + t.Fatalf("unexpected error deleting manifest %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) + return nil +} + +func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { + imageName := args.imageName tag := "thetag" manifestURL, err := env.builder.BuildManifestURL(imageName, tag) @@ -567,6 +802,9 @@ func TestManifestAPI(t *testing.T) { dgst, err := digest.FromBytes(payload) checkErr(t, err, "digesting manifest") + args.signedManifest = signedManifest + args.dgst = dgst + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") @@ -687,6 +925,70 @@ func TestManifestAPI(t *testing.T) { if tagsResponse.Tags[0] != tag { t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } + + return env, args +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + signedManifest := args.signedManifest + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + } type testEnv struct { @@ -698,10 +1000,11 @@ type testEnv struct { builder *v2.URLBuilder } -func newTestEnv(t *testing.T) *testEnv { +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, }, } @@ -1005,7 +1308,7 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { for _, hv := range resp.Header[k] { if hv != v { - t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v) + t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) } } } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f61b2c1e..2ff8e428 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -106,6 +106,16 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureRedis(&configuration) app.configureLogHook(&configuration) + deleteEnabled := false + if d, ok := configuration.Storage["delete"]; ok { + e, ok := d["enabled"] + if ok { + if deleteEnabled, ok = e.(bool); !ok { + deleteEnabled = false + } + } + } + // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { v, ok := cc["blobdescriptor"] @@ -119,10 +129,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis)) + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled) ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider()) + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled) ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { @@ -133,7 +143,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil) + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled) } app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 98ecaefd..4fc943d6 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -31,7 +31,7 @@ func TestAppDispatcher(t *testing.T) { Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider()), + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index e33bd3c0..b7c06ea2 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -33,8 +33,9 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { } return handlers.MethodHandler{ - "GET": http.HandlerFunc(blobHandler.GetBlob), - "HEAD": http.HandlerFunc(blobHandler.GetBlob), + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), + "DELETE": http.HandlerFunc(blobHandler.DeleteBlob), } } @@ -66,3 +67,27 @@ func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { return } } + +// DeleteBlob deletes a layer blob +func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { + context.GetLogger(bh).Debug("DeleteBlob") + + blobs := bh.Repository.Blobs(bh) + err := blobs.Delete(bh, bh.Digest) + if err != nil { + switch err { + case distribution.ErrBlobUnknown: + w.WriteHeader(http.StatusNotFound) + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + case distribution.ErrUnsupported: + w.WriteHeader(http.StatusMethodNotAllowed) + bh.Errors = append(bh.Errors, v2.ErrorCodeUnsupported) + default: + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown) + } + return + } + + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) +} diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e5b0bc77..68a7f0f0 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -186,16 +186,38 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http w.WriteHeader(http.StatusAccepted) } -// DeleteImageManifest removes the image with the given tag from the registry. +// DeleteImageManifest removes the manifest with the given digest from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("DeleteImageManifest") - // TODO(stevvooe): Unfortunately, at this point, manifest deletes are - // unsupported. There are issues with schema version 1 that make removing - // tag index entries a serious problem in eventually consistent storage. - // Once we work out schema version 2, the full deletion system will be - // worked out and we can add support back. - imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + err = manifests.Delete(imh.Digest) + if err != nil { + switch err { + case digest.ErrDigestUnsupported: + case digest.ErrDigestInvalidFormat: + imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) + return + case distribution.ErrBlobUnknown: + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + w.WriteHeader(http.StatusNotFound) + return + case distribution.ErrUnsupported: + imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) + w.WriteHeader(http.StatusMethodNotAllowed) + default: + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) + w.WriteHeader(http.StatusBadRequest) + return + } + } + + w.WriteHeader(http.StatusAccepted) } // digestManifest takes a digest of the given manifest. This belongs somewhere diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 0dbfe810..23cda829 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -21,13 +21,11 @@ import ( // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { t.Fatalf("error creating random reader: %v", err) } dgst := digest.Digest(tarSumStr) - if err != nil { t.Fatalf("error allocating upload store: %v", err) } @@ -35,7 +33,7 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -139,6 +137,72 @@ func TestSimpleBlobUpload(t *testing.T) { if digest.NewDigest("sha256", h) != sha256Digest { t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) } + + // Delete a blob + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %s", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + _, err = bs.Open(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected success opening deleted blob for read") + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type getting deleted manifest: %#v", err) + } + + // Re-upload the blob + randomBlob, err := ioutil.ReadAll(randomDataReader) + if err != nil { + t.Fatalf("Error reading all of blob %s", err.Error()) + } + expectedDigest, err := digest.FromBytes(randomBlob) + if err != nil { + t.Fatalf("Error getting digest from bytes: %s", err) + } + simpleUpload(t, bs, randomBlob, expectedDigest) + + d, err = bs.Stat(ctx, expectedDigest) + if err != nil { + t.Errorf("unexpected error stat-ing blob") + } + if d.Digest != expectedDigest { + t.Errorf("Mismatching digest with restored blob") + } + + _, err = bs.Open(ctx, expectedDigest) + if err != nil { + t.Errorf("Unexpected error opening blob") + } + + // Reuse state to test delete with a delete-disabled registry + registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false) + repository, err = registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs = repository.Blobs(ctx) + err = bs.Delete(ctx, desc.Digest) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } } // TestSimpleBlobRead just creates a simple blob file and ensures that basic @@ -148,7 +212,7 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -252,19 +316,24 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } bs := repository.Blobs(ctx) + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) +} + +func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { + ctx := context.Background() wr, err := bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting upload: %v", err) } - nn, err := io.Copy(wr, bytes.NewReader([]byte{})) + nn, err := io.Copy(wr, bytes.NewReader(blob)) if err != nil { t.Fatalf("error copying into blob writer: %v", err) } @@ -273,12 +342,12 @@ func TestLayerUploadZeroLength(t *testing.T) { t.Fatalf("unexpected number of bytes copied: %v > 0", nn) } - dgst, err := digest.FromReader(bytes.NewReader([]byte{})) + dgst, err := digest.FromReader(bytes.NewReader(blob)) if err != nil { - t.Fatalf("error getting zero digest: %v", err) + t.Fatalf("error getting digest: %v", err) } - if dgst != digest.DigestSha256EmptyTar { + if dgst != expectedDigest { // sanity check on zero digest t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) } diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 484e2106..724617f8 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -7,7 +7,7 @@ import ( "github.com/docker/distribution/registry/storage/driver" ) -// blobStore implements a the read side of the blob store interface over a +// blobStore implements the read side of the blob store interface over a // driver without enforcing per-repository membership. This object is // intentionally a leaky abstraction, providing utility methods that support // creating and traversing backend links. @@ -143,7 +143,7 @@ type blobStatter struct { pm *pathMapper } -var _ distribution.BlobStatter = &blobStatter{} +var _ distribution.BlobDescriptorService = &blobStatter{} // Stat implements BlobStatter.Stat by returning the descriptor for the blob // in the main blob store. If this method returns successfully, there is @@ -188,3 +188,11 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi Digest: dgst, }, nil } + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index b39c851e..50da7699 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -70,6 +70,11 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) return distribution.Descriptor{}, err } + err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) + if err != nil { + return distribution.Descriptor{}, err + } + return canonical, nil } diff --git a/docs/storage/cache/cachedblobdescriptorstore.go b/docs/storage/cache/cachedblobdescriptorstore.go index a095b19a..94ca8a90 100644 --- a/docs/storage/cache/cachedblobdescriptorstore.go +++ b/docs/storage/cache/cachedblobdescriptorstore.go @@ -26,13 +26,13 @@ type MetricsTracker interface { type cachedBlobStatter struct { cache distribution.BlobDescriptorService - backend distribution.BlobStatter + backend distribution.BlobDescriptorService tracker MetricsTracker } // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobStatter) distribution.BlobStatter { +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &cachedBlobStatter{ cache: cache, backend: backend, @@ -41,7 +41,7 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist // NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and // falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobStatter, tracker MetricsTracker) distribution.BlobStatter { +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { return &cachedBlobStatter{ cache: cache, backend: backend, @@ -77,4 +77,25 @@ fallback: } return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil } diff --git a/docs/storage/cache/memory/memory.go b/docs/storage/cache/memory/memory.go index cdd9abe8..120a6572 100644 --- a/docs/storage/cache/memory/memory.go +++ b/docs/storage/cache/memory/memory.go @@ -44,6 +44,10 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgs return imbdcp.global.Stat(ctx, dgst) } +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { _, err := imbdcp.Stat(ctx, dgst) if err == distribution.ErrBlobUnknown { @@ -80,6 +84,14 @@ func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Co return rsimbdcp.repository.Stat(ctx, dgst) } +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + if rsimbdcp.repository == nil { + return distribution.ErrBlobUnknown + } + + return rsimbdcp.repository.Clear(ctx, dgst) +} + func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if rsimbdcp.repository == nil { // allocate map since we are setting it now. @@ -133,6 +145,14 @@ func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest return desc, nil } +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go index 64010a09..36370bdd 100644 --- a/docs/storage/cache/redis/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -12,7 +12,7 @@ import ( ) // redisBlobStatService provides an implementation of -// BlobDescriptorCacheProvider based on redis. Blob descritors are stored in +// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in // two parts. The first provide fast access to repository membership through a // redis set for each repo. The second is a redis hash keyed by the digest of // the layer, providing path, length and mediatype information. There is also @@ -63,6 +63,27 @@ func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Di return rbds.stat(ctx, conn, dgst) } +func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rbds.pool.Get() + defer conn.Close() + + // Not atomic in redis <= 2.3 + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + if err != nil { + return err + } + + if reply == 0 { + return distribution.ErrBlobUnknown + } + + return nil +} + // stat provides an internal stat call that takes a connection parameter. This // allows some internal management of the connection scope. func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { @@ -170,6 +191,28 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Conte return upstream, nil } +// Clear removes the descriptor from the cache and forwards to the upstream descriptor store +func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { + if err := dgst.Validate(); err != nil { + return err + } + + conn := rsrbds.upstream.pool.Get() + defer conn.Close() + + // Check membership to repository first + member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) + if err != nil { + return err + } + + if !member { + return distribution.ErrBlobUnknown + } + + return rsrbds.upstream.Clear(ctx, dgst) +} + func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err diff --git a/docs/storage/cache/suite.go b/docs/storage/cache/suite.go index f74d9f9e..b5a2f643 100644 --- a/docs/storage/cache/suite.go +++ b/docs/storage/cache/suite.go @@ -139,3 +139,40 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } } + +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:abc") + expected := distribution.Descriptor{ + Digest: "sha256:abc", + Size: 10, + MediaType: "application/octet-stream"} + + cache, err := provider.RepositoryScoped("foo/bar") + if err != nil { + t.Fatalf("unexpected error getting scoped cache: %v", err) + } + + if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { + t.Fatalf("error setting descriptor: %v", err) + } + + desc, err := cache.Stat(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error statting fake2:abc: %v", err) + } + + if expected != desc { + t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) + } + + err = cache.Clear(ctx, localDigest) + if err != nil { + t.Fatalf("unexpected error deleting descriptor") + } + + nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + err = cache.Clear(ctx, nonExistantDigest) + if err == nil { + t.Fatalf("expected error deleting unknown descriptor") + } +} diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index a9a046a7..aebe6730 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -22,7 +22,7 @@ func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() - registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider()) + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false) rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) repos := []string{ diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index cb9d9b2b..e7a98bbb 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -16,10 +16,11 @@ import ( // that grant access to the global blob store. type linkedBlobStore struct { *blobStore - blobServer distribution.BlobServer - statter distribution.BlobStatter - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool // linkPath allows one to control the repository blob link set to which // the blob store dispatches. This is required because manifest and layer @@ -31,7 +32,7 @@ type linkedBlobStore struct { var _ distribution.BlobStore = &linkedBlobStore{} func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return lbs.statter.Stat(ctx, dgst) + return lbs.blobAccessController.Stat(ctx, dgst) } func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { @@ -67,6 +68,10 @@ func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter } func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + dgst, err := digest.FromBytes(p) + if err != nil { + return distribution.Descriptor{}, err + } // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { @@ -74,6 +79,10 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return distribution.Descriptor{}, err } + if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { + return distribution.Descriptor{}, err + } + // TODO(stevvooe): Write out mediatype if incoming differs from what is // returned by Put above. Note that we should allow updates for a given // repository. @@ -153,7 +162,26 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution return lbs.newBlobUpload(ctx, id, path, startedAt) } -// newLayerUpload allocates a new upload controller with the given state. +func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + if !lbs.deleteEnabled { + return distribution.ErrUnsupported + } + + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +// newBlobUpload allocates a new upload controller with the given state. func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { fw, err := newFileWriter(ctx, lbs.driver, path) if err != nil { @@ -213,7 +241,7 @@ type linkedBlobStatter struct { linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) } -var _ distribution.BlobStatter = &linkedBlobStatter{} +var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) @@ -246,6 +274,20 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis return lbs.blobStore.statter.Stat(ctx, target) } +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + return lbs.blobStore.driver.Delete(ctx, blobLinkPath) +} + +func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + // The canonical descriptor for a blob is set at the commit phase of upload + return nil +} + // blobLinkPath provides the path to the blob link, also known as layers. func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { return pm.path(layerLinkPathSpec{name: name, digest: dgst}) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 27d6a9fa..c8c19d43 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -69,8 +69,8 @@ func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { // Delete removes the revision of the specified manfiest. func (ms *manifestStore) Delete(dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete - unsupported") - return fmt.Errorf("deletion of manifests not supported") + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.revisionStore.delete(ms.ctx, dgst) } func (ms *manifestStore) Tags() ([]string, error) { diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 55ea80ac..ca583924 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -29,8 +29,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider()) - + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) repo, err := registry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -156,6 +155,7 @@ func TestManifestStorage(t *testing.T) { } fetchedManifest, err := ms.GetByTag(env.tag) + if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } @@ -296,11 +296,68 @@ func TestManifestStorage(t *testing.T) { } } - // TODO(stevvooe): Currently, deletes are not supported due to some - // complexity around managing tag indexes. We'll add this support back in - // when the manifest format has settled. For now, we expect an error for - // all deletes. - if err := ms.Delete(dgst); err == nil { + // Test deleting manifests + err = ms.Delete(dgst) + if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if exists { + t.Errorf("Deleted manifest should not exist") + } + + deletedManifest, err := ms.Get(dgst) + if err == nil { + t.Errorf("Unexpected success getting deleted manifest") + } + switch err.(type) { + case distribution.ErrManifestUnknownRevision: + break + default: + t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) + } + + if deletedManifest != nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + // Re-upload should restore manifest to a good state + err = ms.Put(sm) + if err != nil { + t.Errorf("Error re-uploading deleted manifest") + } + + exists, err = ms.Exists(dgst) + if err != nil { + t.Fatalf("Error querying manifest existence") + } + if !exists { + t.Errorf("Restored manifest should exist") + } + + deletedManifest, err = ms.Get(dgst) + if err != nil { + t.Errorf("Unexpected error getting manifest") + } + if deletedManifest == nil { + t.Errorf("Deleted manifest get returned non-nil") + } + + r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false) + repo, err := r.Repository(ctx, env.name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + ms, err = repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + err = ms.Delete(dgst) + if err == nil { + t.Errorf("Unexpected success deleting while disabled") + } } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index cf0fe3e7..8bfe0864 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -15,15 +15,16 @@ type registry struct { blobServer distribution.BlobServer statter distribution.BlobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool } // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is // cheap to allocate. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) distribution.Namespace { +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool) distribution.Namespace { // create global statter, with cache. - var statter distribution.BlobStatter = &blobStatter{ + var statter distribution.BlobDescriptorService = &blobStatter{ driver: driver, pm: defaultPathMapper, } @@ -46,6 +47,7 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv pathFn: bs.path, }, blobDescriptorCacheProvider: blobDescriptorCacheProvider, + deleteEnabled: deleteEnabled, } } @@ -107,10 +109,11 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M ctx: ctx, repository: repo, blobStore: &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - statter: &linkedBlobStatter{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ blobStore: repo.blobStore, repository: repo, linkPath: manifestRevisionLinkPath, @@ -143,7 +146,7 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { - var statter distribution.BlobStatter = &linkedBlobStatter{ + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ blobStore: repo.blobStore, repository: repo, linkPath: blobLinkPath, @@ -154,15 +157,16 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { } return &linkedBlobStore{ - blobStore: repo.blobStore, - blobServer: repo.blobServer, - statter: statter, - repository: repo, - ctx: ctx, + blobStore: repo.blobStore, + blobServer: repo.blobServer, + blobAccessController: statter, + repository: repo, + ctx: ctx, // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. - linkPath: blobLinkPath, + linkPath: blobLinkPath, + deleteEnabled: repo.registry.deleteEnabled, } } diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index 9838bff2..9dea78e8 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -17,19 +17,6 @@ type revisionStore struct { ctx context.Context } -func newRevisionStore(ctx context.Context, repo *repository, blobStore *blobStore) *revisionStore { - return &revisionStore{ - ctx: ctx, - repository: repo, - blobStore: &linkedBlobStore{ - blobStore: blobStore, - repository: repo, - ctx: ctx, - linkPath: manifestRevisionLinkPath, - }, - } -} - // get retrieves the manifest, keyed by revision digest. func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { // Ensure that this revision is available in this repository. @@ -118,3 +105,7 @@ func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) ( return revision, nil } + +func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { + return rs.blobStore.Delete(ctx, revision) +} diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index f6c23e27..78fd2e6c 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -115,8 +115,8 @@ func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { return nil } -// namedBlobStore returns the namedBlobStore of the signatures for the -// manifest with the given digest. Effectively, each singature link path +// linkedBlobStore returns the namedBlobStore of the signatures for the +// manifest with the given digest. Effectively, each signature link path // layout is a unique linked blob store. func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { @@ -131,7 +131,7 @@ func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Di ctx: ctx, repository: s.repository, blobStore: s.blobStore, - statter: &linkedBlobStatter{ + blobAccessController: &linkedBlobStatter{ blobStore: s.blobStore, repository: s.repository, linkPath: linkpath, From 24408263d994e911834fc3eb06f054a9c19332ac Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 24 Jul 2015 10:42:02 -0700 Subject: [PATCH 231/501] Manifest PUT should return 201 Created Change handler, update descriptors table, regenerate API spec, and update test. Signed-off-by: Aaron Lehmann --- docs/api/v2/descriptors.go | 2 +- docs/handlers/api_test.go | 4 ++-- docs/handlers/images.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index f2551ffe..d6e37d05 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -523,7 +523,7 @@ var routeDescriptors = []RouteDescriptor{ Successes: []ResponseDescriptor{ { Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusAccepted, + StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 8d631941..62a03b62 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -423,7 +423,7 @@ func TestManifestAPI(t *testing.T) { checkErr(t, err, "building manifest url") resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -432,7 +432,7 @@ func TestManifestAPI(t *testing.T) { // -------------------- // Push by digest -- should get same result resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e5b0bc77..61eac69e 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -183,7 +183,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http w.Header().Set("Location", location) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.WriteHeader(http.StatusAccepted) + w.WriteHeader(http.StatusCreated) } // DeleteImageManifest removes the image with the given tag from the registry. From 345174a34b54a33d687ade2c7f992c68bb0f1d66 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Jul 2015 23:03:13 -0700 Subject: [PATCH 232/501] Etags must be quoted according to http spec Signed-off-by: Stephen J Day --- docs/client/repository.go | 11 ++++++----- docs/client/repository_test.go | 2 +- docs/handlers/api_test.go | 11 ++++++----- docs/handlers/images.go | 4 ++-- docs/storage/blobserver.go | 2 +- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 29effcce..011bc017 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -254,13 +254,14 @@ func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { return ms.GetByTag(dgst.String()) } -// AddEtagToTag allows a client to supply an eTag to GetByTag which will -// be used for a conditional HTTP request. If the eTag matches, a nil -// manifest and nil error will be returned. -func AddEtagToTag(tagName, dgst string) distribution.ManifestServiceOption { +// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and nil error will be returned. etag is automatically quoted when added to +// this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { return func(ms distribution.ManifestService) error { if ms, ok := ms.(*manifests); ok { - ms.etags[tagName] = dgst + ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) return nil } return fmt.Errorf("etag options is a client-only option") diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 232501aa..31e61864 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -430,7 +430,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Method: "GET", Route: "/v2/" + repo + "/manifests/" + reference, Headers: http.Header(map[string][]string{ - "Etag": {dgst}, + "Etag": {fmt.Sprintf(`"%s"`, dgst)}, }), } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 4473eb99..2c6a6003 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -429,7 +429,7 @@ func TestBlobAPI(t *testing.T) { checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, "Docker-Content-Digest": []string{canonicalDigest.String()}, - "ETag": []string{canonicalDigest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, "Cache-Control": []string{"max-age=31536000"}, }) @@ -440,6 +440,7 @@ func TestBlobAPI(t *testing.T) { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) @@ -597,7 +598,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifest manifest.SignedManifest @@ -619,7 +620,7 @@ func TestManifestAPI(t *testing.T) { checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestByDigest manifest.SignedManifest @@ -998,12 +999,12 @@ func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { for _, v := range vs { if v == "*" { // Just ensure there is some value. - if len(resp.Header[k]) > 0 { + if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { continue } } - for _, hv := range resp.Header[k] { + for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { if hv != v { t.Fatalf("%v header value not matched in response: %q != %q", k, hv, v) } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e5b0bc77..c1cae4fc 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -90,13 +90,13 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Header().Set("Etag", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) w.Write(sm.Raw) } func etagMatch(r *http.Request, etag string) bool { for _, headerVal := range r.Header["If-None-Match"] { - if headerVal == etag { + if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted return true } } diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index d0b3204c..36547bcc 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -47,7 +47,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h } defer br.Close() - w.Header().Set("ETag", desc.Digest.String()) // If-None-Match handled by ServeContent + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) if w.Header().Get("Docker-Content-Digest") == "" { From 6b4573225c7034a05775953b67e6c6ffa7da5682 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 24 Jul 2015 16:14:04 -0700 Subject: [PATCH 233/501] Make the registry client more tolerant about HTTP status codes Generally, all 2xx and 3xx codes should be treated as success. Signed-off-by: Aaron Lehmann --- docs/client/auth/session.go | 3 +- docs/client/blob_writer.go | 12 +++--- docs/client/errors.go | 6 +++ docs/client/repository.go | 64 ++++++++++------------------ docs/client/transport/http_reader.go | 7 +-- 5 files changed, 40 insertions(+), 52 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 27e1d9e3..27a2aa71 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" ) @@ -209,7 +210,7 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token string, err } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + if !client.SuccessStatus(resp.StatusCode) { return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 9ebd4183..5f6f01f7 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -44,7 +44,7 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { return 0, err } - if resp.StatusCode != http.StatusAccepted { + if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } @@ -79,7 +79,7 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { return 0, err } - if resp.StatusCode != http.StatusAccepted { + if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } @@ -142,7 +142,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip } defer resp.Body.Close() - if resp.StatusCode != http.StatusCreated { + if !SuccessStatus(resp.StatusCode) { return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } @@ -160,12 +160,10 @@ func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusNoContent, http.StatusNotFound: + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { return nil - default: - return hbu.handleErrorResponse(resp) } + return hbu.handleErrorResponse(resp) } func (hbu *httpBlobUpload) Close() error { diff --git a/docs/client/errors.go b/docs/client/errors.go index 2c168400..ebd1c36c 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -61,3 +61,9 @@ func handleErrorResponse(resp *http.Response) error { } return &UnexpectedHTTPStatusError{Status: resp.Status} } + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/docs/client/repository.go b/docs/client/repository.go index 50e7b5ce..d0079f09 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -70,8 +70,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: + if SuccessStatus(resp.StatusCode) { var ctlg struct { Repositories []string `json:"repositories"` } @@ -90,8 +89,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri if link == "" { returnErr = io.EOF } - - default: + } else { return 0, handleErrorResponse(resp) } @@ -199,8 +197,7 @@ func (ms *manifests) Tags() ([]string, error) { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: + if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err @@ -214,11 +211,10 @@ func (ms *manifests) Tags() ([]string, error) { } return tagsResponse.Tags, nil - case http.StatusNotFound: + } else if resp.StatusCode == http.StatusNotFound { return nil, nil - default: - return nil, handleErrorResponse(resp) } + return nil, handleErrorResponse(resp) } func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { @@ -238,14 +234,12 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return false, err } - switch resp.StatusCode { - case http.StatusOK: + if SuccessStatus(resp.StatusCode) { return true, nil - case http.StatusNotFound: + } else if resp.StatusCode == http.StatusNotFound { return false, nil - default: - return false, handleErrorResponse(resp) } + return false, handleErrorResponse(resp) } func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { @@ -294,8 +288,9 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: + if resp.StatusCode == http.StatusNotModified { + return nil, nil + } else if SuccessStatus(resp.StatusCode) { var sm manifest.SignedManifest decoder := json.NewDecoder(resp.Body) @@ -303,11 +298,8 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic return nil, err } return &sm, nil - case http.StatusNotModified: - return nil, nil - default: - return nil, handleErrorResponse(resp) } + return nil, handleErrorResponse(resp) } func (ms *manifests) Put(m *manifest.SignedManifest) error { @@ -329,13 +321,11 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusAccepted: + if SuccessStatus(resp.StatusCode) { // TODO(dmcgowan): make use of digest header return nil - default: - return handleErrorResponse(resp) } + return handleErrorResponse(resp) } func (ms *manifests) Delete(dgst digest.Digest) error { @@ -354,12 +344,10 @@ func (ms *manifests) Delete(dgst digest.Digest) error { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusAccepted: + if SuccessStatus(resp.StatusCode) { return nil - default: - return handleErrorResponse(resp) } + return handleErrorResponse(resp) } type blobs struct { @@ -461,8 +449,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusAccepted: + if SuccessStatus(resp.StatusCode) { // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -477,9 +464,8 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { startedAt: time.Now(), location: location, }, nil - default: - return nil, handleErrorResponse(resp) } + return nil, handleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -508,8 +494,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: + if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { @@ -521,11 +506,10 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi Size: length, Digest: dgst, }, nil - case http.StatusNotFound: + } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown - default: - return distribution.Descriptor{}, handleErrorResponse(resp) } + return distribution.Descriptor{}, handleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -559,12 +543,10 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { } defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusAccepted: + if SuccessStatus(resp.StatusCode) { return nil - default: - return handleErrorResponse(resp) } + return handleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index e351bdfe..b2e74ddb 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -154,10 +154,11 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return nil, err } - switch { - case resp.StatusCode == 200: + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { hrs.rc = resp.Body - default: + } else { defer resp.Body.Close() return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } From 29a810b68be7d1f8696019539bb31ec3f9a9dc7f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 23 Jul 2015 23:16:27 -0700 Subject: [PATCH 234/501] Allow disabling of starage driver redirects Storage drivers can implement a method called URLFor which can return a direct url for a given path. The functionality allows the registry to direct clients to download content directly from the backend storage. This is commonly used with s3 and cloudfront. Under certain conditions, such as when the registry is not local to the backend, these redirects can hurt performance and waste incoming bandwidth on pulls. This feature addition allows one to disable this feature, if required. Signed-off-by: Stephen J Day Conflicts: configuration/configuration.go registry/handlers/app.go registry/storage/catalog_test.go registry/storage/manifeststore_test.go registry/storage/registry.go --- docs/handlers/app.go | 25 +++++++++++++++++++++---- docs/handlers/app_test.go | 2 +- docs/storage/blob_test.go | 8 ++++---- docs/storage/blobserver.go | 16 +++++++++++----- docs/storage/catalog_test.go | 2 +- docs/storage/manifeststore_test.go | 5 +++-- docs/storage/registry.go | 16 ++++++++++------ 7 files changed, 51 insertions(+), 23 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index fd8f36bb..ab46c032 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -106,7 +106,8 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureRedis(&configuration) app.configureLogHook(&configuration) - deleteEnabled := false + // configure deletion + var deleteEnabled bool if d, ok := configuration.Storage["delete"]; ok { e, ok := d["enabled"] if ok { @@ -116,6 +117,22 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App } } + // configure redirects + var redirectDisabled bool + if redirectConfig, ok := configuration.Storage["redirect"]; ok { + v := redirectConfig["disable"] + switch v := v.(type) { + case bool: + redirectDisabled = v + default: + panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) + } + + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } + } + // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { v, ok := cc["blobdescriptor"] @@ -129,10 +146,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled) + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled) ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled) + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled) ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { @@ -143,7 +160,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled) + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled) } app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 4fc943d6..84d842e3 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -31,7 +31,7 @@ func TestAppDispatcher(t *testing.T) { Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true), + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 23cda829..7719bab1 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -33,7 +33,7 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -193,7 +193,7 @@ func TestSimpleBlobUpload(t *testing.T) { } // Reuse state to test delete with a delete-disabled registry - registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false) + registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) repository, err = registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -212,7 +212,7 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -316,7 +316,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index 36547bcc..24aeba69 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -17,9 +17,10 @@ const blobCacheControlMaxAge = 365 * 24 * time.Hour // blobServer simply serves blobs from a driver instance using a path function // to identify paths and a descriptor service to fill in metadata. type blobServer struct { - driver driver.StorageDriver - statter distribution.BlobStatter - pathFn func(dgst digest.Digest) (string, error) + driver driver.StorageDriver + statter distribution.BlobStatter + pathFn func(dgst digest.Digest) (string, error) + redirect bool // allows disabling URLFor redirects } func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { @@ -37,8 +38,13 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h switch err { case nil: - // Redirect to storage URL. - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + if bs.redirect { + // Redirect to storage URL. + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + return err + } + + fallthrough case driver.ErrUnsupportedMethod: // Fallback to serving the content directly. br, err := newFileReader(ctx, bs.driver, path, desc.Size) diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index aebe6730..862777aa 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -22,7 +22,7 @@ func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() - registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false) + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) repos := []string{ diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index ca583924..5bbbd4a2 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -29,7 +29,8 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) + repo, err := registry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -347,7 +348,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest get returned non-nil") } - r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false) + r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) repo, err := r.Repository(ctx, env.name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 8bfe0864..8149be11 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -20,9 +20,12 @@ type registry struct { // NewRegistryWithDriver creates a new registry instance from the provided // driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool) distribution.Namespace { - +// cheap to allocate. If redirect is true, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +// +// TODO(stevvooe): This function signature is getting out of hand. Move to +// functional options for instance configuration. +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool) distribution.Namespace { // create global statter, with cache. var statter distribution.BlobDescriptorService = &blobStatter{ driver: driver, @@ -42,9 +45,10 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv return ®istry{ blobStore: bs, blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, + driver: driver, + statter: statter, + pathFn: bs.path, + redirect: redirect, }, blobDescriptorCacheProvider: blobDescriptorCacheProvider, deleteEnabled: deleteEnabled, From 9d73bfe5781bc4433c65618ac41d1b1157a1950e Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 28 Jul 2015 10:59:11 -0700 Subject: [PATCH 235/501] Fix for api_test.go This passed in the #744 before merge, but apparently the test changed since the PR was created in ways that led to a new failures. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 4b16127f..c484835f 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -963,7 +963,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { // -------------------- // Re-upload manifest by digest resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -1372,7 +1372,7 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) checkErr(t, err, "building manifest url") resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusAccepted) + checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, From e83af616d6b6f4f81bfd8131a9d843c445857ac3 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 29 Jul 2015 12:50:43 -0700 Subject: [PATCH 236/501] Automatically generate a HTTP secret if none is provided Log a warning if the registry generates its own secret. Update configuration doc, and remove the default secret from the development config file. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index fd8f36bb..9fb82cbb 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -1,6 +1,7 @@ package handlers import ( + cryptorand "crypto/rand" "expvar" "fmt" "math/rand" @@ -30,6 +31,10 @@ import ( "golang.org/x/net/context" ) +// randomSecretSize is the number of random bytes to generate if no secret +// was specified. +const randomSecretSize = 32 + // App is a global registry application object. Shared resources can be placed // on this object that will be accessible from all requests. Any writable // fields should be protected. @@ -102,6 +107,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App panic(err) } + app.configureSecret(&configuration) app.configureEvents(&configuration) app.configureRedis(&configuration) app.configureLogHook(&configuration) @@ -337,6 +343,19 @@ func (app *App) configureLogHook(configuration *configuration.Configuration) { } } +// configureSecret creates a random secret if a secret wasn't included in the +// configuration. +func (app *App) configureSecret(configuration *configuration.Configuration) { + if configuration.HTTP.Secret == "" { + var secretBytes [randomSecretSize]byte + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) + } + configuration.HTTP.Secret = string(secretBytes[:]) + ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + } +} + func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. From fd404e78500fbc1ccc68d2476bf0053e6aaab21e Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 29 Jul 2015 16:52:47 -0700 Subject: [PATCH 237/501] When a request completes ensure only one log line is printed which includes the http response. When debugging non-successful registry requests this will place the error details and http status fields in the same log line giving easier visibility to what error occured in the request. Signed-off-by: Richard Scothern --- docs/handlers/app.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index fd8f36bb..12f456fc 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -343,8 +343,12 @@ func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Instantiate an http context here so we can track the error codes // returned by the request router. ctx := defaultContextManager.context(app, w, r) + defer func() { - ctxu.GetResponseLogger(ctx).Infof("response completed") + status, ok := ctx.Value("http.response.status").(int) + if ok && status >= 200 && status <= 399 { + ctxu.GetResponseLogger(ctx).Infof("response completed") + } }() defer defaultContextManager.release(ctx) @@ -424,11 +428,11 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { - app.logError(context, context.Errors) - if err := errcode.ServeJSON(w, context.Errors); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } + + app.logError(context, context.Errors) } }) } @@ -457,7 +461,7 @@ func (app *App) logError(context context.Context, errors errcode.Errors) { "err.code", "err.message", "err.detail")) - ctxu.GetLogger(c).Errorf("An error occured") + ctxu.GetResponseLogger(c).Errorf("response completed with error") } } From 6a11f5a024c9446bb09958416c47ab7813448568 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 30 Jul 2015 13:34:53 -0700 Subject: [PATCH 238/501] Add image name tests around hostnames Signed-off-by: Stephen J Day --- docs/api/v2/names_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 89ab9c61..656ae846 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -164,6 +164,23 @@ var ( err: ErrRepositoryNameComponentInvalid, invalid: true, }, + { + input: "b.gcr.io/test.example.com/my-app", // embedded domain component + }, + // TODO(stevvooe): The following is a punycode domain name that we may + // want to allow in the future. Currently, this is not allowed but we + // may want to change this in the future. Adding this here as invalid + // for the time being. + { + input: "xn--n3h.com/myimage", // http://☃.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "xn--7o8h.com/myimage", // http://🐳.com in punycode + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, } ) From d79179884af9e42ff239b034d5a015eaa4a579ac Mon Sep 17 00:00:00 2001 From: Li Yi Date: Mon, 11 May 2015 23:26:51 +0800 Subject: [PATCH 239/501] Support OSS driver Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 813 ++++++++++++++++++++++++++++ docs/storage/driver/oss/oss_test.go | 152 ++++++ 2 files changed, 965 insertions(+) create mode 100755 docs/storage/driver/oss/oss.go create mode 100755 docs/storage/driver/oss/oss_test.go diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go new file mode 100755 index 00000000..f85c7541 --- /dev/null +++ b/docs/storage/driver/oss/oss.go @@ -0,0 +1,813 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that OSS guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +package oss + +import ( + "bytes" + "fmt" + "github.com/docker/distribution/context" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyId string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + params := DriverParameters{ + AccessKeyId: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyId, params.AccessKeySecret) + bucket := client.Bucket(params.Bucket) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []oss.Part{} + var part oss.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the OSS + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying OSS library should handle it, it doesn't seem to + // be part of the shouldRetry function (see denverdino/aliyungo/oss). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part oss.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the OSS package does not. We may add oss + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to OSS slows to a crawl. If the RequestTimeout + // ends up getting added to the OSS library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *oss.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.ossPath(path), nil) + if err != nil { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Name+"/"+d.ossPath(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{}, + d.Bucket.Name+"/"+d.ossPath(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), + oss.CopyOptions{ + //Options: d.getOptions(), + //ContentType: d.getContentType() + }, + d.Bucket.Path(d.ossPath(sourcePath))) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + logrus.Infof("expiresTime: %d", expiresTime) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("expiresTime: %d", expiresTime) + testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("testURL: %s", testURL) + return testURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the OSS bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).ossPath(path) +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go new file mode 100755 index 00000000..ecfe36e9 --- /dev/null +++ b/docs/storage/driver/oss/oss_test.go @@ -0,0 +1,152 @@ +package oss + +import ( + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "io/ioutil" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type OSSDriverConstructor func(rootDirectory string) (*Driver, error) + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor := func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + alioss.Region(region), + internalBool, + encryptBool, + secureBool, + minChunkSize, + rootDirectory, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + } + + testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) + + // ossConstructor := func() (*Driver, error) { + // return ossDriverConstructor(aws.GetRegion(region)) + // } + + RegisterOSSDriverSuite(ossDriverConstructor, skipCheck) + + // testsuites.RegisterIPCSuite(driverName, map[string]string{ + // "accesskey": accessKey, + // "secretkey": secretKey, + // "region": region.Name, + // "bucket": bucket, + // "encrypt": encrypt, + // }, skipCheck) + // } +} + +func RegisterOSSDriverSuite(ossDriverConstructor OSSDriverConstructor, skipCheck testsuites.SkipCheck) { + check.Suite(&OSSDriverSuite{ + Constructor: ossDriverConstructor, + SkipCheck: skipCheck, + }) +} + +type OSSDriverSuite struct { + Constructor OSSDriverConstructor + testsuites.SkipCheck +} + +func (suite *OSSDriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } +} + +func (suite *OSSDriverSuite) TestEmptyRootList(c *check.C) { + validRoot, err := ioutil.TempDir("", "driver-") + c.Assert(err, check.IsNil) + defer os.Remove(validRoot) + + rootedDriver, err := suite.Constructor(validRoot) + c.Assert(err, check.IsNil) + emptyRootDriver, err := suite.Constructor("") + c.Assert(err, check.IsNil) + slashRootDriver, err := suite.Constructor("/") + c.Assert(err, check.IsNil) + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + c.Assert(err, check.IsNil) + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } +} From bffce5722e7ea1563c0bac4809cf7a1f4a994795 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 12 May 2015 00:06:14 +0800 Subject: [PATCH 240/501] Fix the warning of golint Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index f85c7541..598bc55c 100755 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -47,7 +47,7 @@ const listMax = 1000 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { - AccessKeyId string + AccessKeyID string AccessKeySecret string Bucket string Region oss.Region @@ -176,7 +176,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { } params := DriverParameters{ - AccessKeyId: fmt.Sprint(accessKey), + AccessKeyID: fmt.Sprint(accessKey), AccessKeySecret: fmt.Sprint(secretKey), Bucket: fmt.Sprint(bucket), Region: oss.Region(fmt.Sprint(regionName)), @@ -194,7 +194,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // bucketName func New(params DriverParameters) (*Driver, error) { - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyId, params.AccessKeySecret) + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the From 46148721e188955ba5196f5895ebbc3274690cb4 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 12 May 2015 11:56:00 +0800 Subject: [PATCH 241/501] Add the secure access with HTTPS Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 598bc55c..9c52d577 100755 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -125,7 +125,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { if ok { internalBool, ok = internal.(bool) if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") + return nil, fmt.Errorf("The internal parameter should be a boolean") } } @@ -194,7 +194,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // bucketName func New(params DriverParameters) (*Driver, error) { - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret) + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the From d28a3fa28a4a97e3d1708b7e3a452fd500aad762 Mon Sep 17 00:00:00 2001 From: tgic Date: Mon, 15 Jun 2015 20:03:32 +0800 Subject: [PATCH 242/501] add endpoint support --- docs/storage/driver/oss/oss.go | 8 ++++++++ docs/storage/driver/oss/oss_test.go | 20 +++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) mode change 100755 => 100644 docs/storage/driver/oss/oss.go mode change 100755 => 100644 docs/storage/driver/oss/oss_test.go diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go old mode 100755 new mode 100644 index 9c52d577..91ab4b1a --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -56,6 +56,7 @@ type DriverParameters struct { Secure bool ChunkSize int64 RootDirectory string + Endpoint string } func init() { @@ -175,6 +176,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { rootDirectory = "" } + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + params := DriverParameters{ AccessKeyID: fmt.Sprint(accessKey), AccessKeySecret: fmt.Sprint(secretKey), @@ -185,6 +191,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { Encrypt: encryptBool, Secure: secureBool, Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), } return New(params) @@ -195,6 +202,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go old mode 100755 new mode 100644 index ecfe36e9..2b469f34 --- a/docs/storage/driver/oss/oss_test.go +++ b/docs/storage/driver/oss/oss_test.go @@ -27,6 +27,7 @@ func init() { internal := os.Getenv("OSS_INTERNAL") encrypt := os.Getenv("OSS_ENCRYPT") secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) @@ -59,15 +60,16 @@ func init() { } parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - alioss.Region(region), - internalBool, - encryptBool, - secureBool, - minChunkSize, - rootDirectory, + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, } return New(parameters) From fc20dd72d6926650e7c4a6a52929b372198fb4d3 Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 16 Jun 2015 14:06:04 +0800 Subject: [PATCH 243/501] check access key and secret before run --- docs/storage/driver/oss/oss.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 91ab4b1a..21a7e32a 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -104,11 +104,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // be summoned when GetAuth is called) accessKey, ok := parameters["accesskeyid"] if !ok { - accessKey = "" + return nil, fmt.Errorf("No accesskeyid parameter provided") } secretKey, ok := parameters["accesskeysecret"] if !ok { - secretKey = "" + return nil, fmt.Errorf("No accesskeysecret parameter provided") } regionName, ok := parameters["region"] From 440664a109f85b178c23405e5067b69455ad62b4 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Thu, 2 Jul 2015 22:42:12 +0800 Subject: [PATCH 244/501] Update the OSS test case for latest code change Signed-off-by: Li Yi --- docs/storage/driver/oss/oss_test.go | 89 ++++++++++++----------------- 1 file changed, 38 insertions(+), 51 deletions(-) diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go index 2b469f34..2749a3d0 100644 --- a/docs/storage/driver/oss/oss_test.go +++ b/docs/storage/driver/oss/oss_test.go @@ -17,7 +17,9 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -type OSSDriverConstructor func(rootDirectory string) (*Driver, error) +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string func init() { accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") @@ -34,7 +36,7 @@ func init() { } defer os.Remove(root) - ossDriverConstructor := func(rootDirectory string) (*Driver, error) { + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -76,79 +78,64 @@ func init() { } // Skip OSS storage driver tests if environment variable parameters are not provided - skipCheck := func() string { + skipCheck = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" } return "" } - driverConstructor := func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) } - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - // ossConstructor := func() (*Driver, error) { - // return ossDriverConstructor(aws.GetRegion(region)) - // } - - RegisterOSSDriverSuite(ossDriverConstructor, skipCheck) - - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // "accesskey": accessKey, - // "secretkey": secretKey, - // "region": region.Name, - // "bucket": bucket, - // "encrypt": encrypt, - // }, skipCheck) - // } -} - -func RegisterOSSDriverSuite(ossDriverConstructor OSSDriverConstructor, skipCheck testsuites.SkipCheck) { - check.Suite(&OSSDriverSuite{ - Constructor: ossDriverConstructor, - SkipCheck: skipCheck, - }) -} - -type OSSDriverSuite struct { - Constructor OSSDriverConstructor - testsuites.SkipCheck -} - -func (suite *OSSDriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } -} - -func (suite *OSSDriverSuite) TestEmptyRootList(c *check.C) { validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } defer os.Remove(validRoot) - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } } From a9c3f86ce06c4a7390192296ea12a6fad20937e3 Mon Sep 17 00:00:00 2001 From: tgic Date: Sat, 4 Jul 2015 23:53:00 +0800 Subject: [PATCH 245/501] fix oss: got 403 in TestContinueStreamAppendLarge Signed-off-by: tgic --- docs/storage/driver/oss/oss.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 21a7e32a..b3ab11c9 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -492,7 +492,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // currentLength >= offset >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.ossPath(path)) + d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } @@ -586,7 +586,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // offset > currentLength >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{}, - d.Bucket.Name+"/"+d.ossPath(path)) + d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } From faee4224209eb9b41cf3ba242a1e18ce434eba80 Mon Sep 17 00:00:00 2001 From: tgic Date: Sun, 5 Jul 2015 01:14:24 +0800 Subject: [PATCH 246/501] fix testcase TestReadStreamWithOffset incompatible with oss Signed-off-by: tgic --- docs/storage/driver/oss/oss.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index b3ab11c9..d12f6ed2 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -276,12 +276,18 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) if err != nil { - if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - return nil, parseError(path, err) } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return resp.Body, nil } From 5d34d31739e812153d530bfc65eec63e9834660c Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 26 Jul 2015 07:35:20 +0800 Subject: [PATCH 247/501] Update the comments for consistence model Change-Id: I161522ee51f247fb17e42844b3699bd9031e34e8 Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index d12f6ed2..4e514f37 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -8,8 +8,7 @@ // time for directories (directories are an abstraction for key, value stores) // // Keep in mind that OSS guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true +// that a successful write will mean immediate access to the data written. The only true // guarantee is that once you call Stat and receive a certain file size, that much of // the file is already accessible. package oss From 9c27080c7a18bb8afcec3d88bb6873799701bcea Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 26 Jul 2015 10:01:45 +0800 Subject: [PATCH 248/501] Update the comment for the consistency model Change-Id: Iee49afeda1c11d6af8c0f26c96d8ccc328c22757 Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 4e514f37..2303ebd0 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -7,10 +7,6 @@ // Because OSS is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that OSS guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written. The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. package oss import ( From 235ccc05904427fe0f82246d5d7289b2a0d97b49 Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 28 Jul 2015 12:45:05 +0800 Subject: [PATCH 249/501] add include_oss build tag Signed-off-by: tgic --- docs/storage/driver/oss/doc.go | 3 +++ docs/storage/driver/oss/oss.go | 2 ++ docs/storage/driver/oss/oss_test.go | 2 ++ 3 files changed, 7 insertions(+) create mode 100644 docs/storage/driver/oss/doc.go diff --git a/docs/storage/driver/oss/doc.go b/docs/storage/driver/oss/doc.go new file mode 100644 index 00000000..d1bc932f --- /dev/null +++ b/docs/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 2303ebd0..cbda6d16 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -7,6 +7,8 @@ // Because OSS is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // +// +build include_oss + package oss import ( diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go index 2749a3d0..56ec3208 100644 --- a/docs/storage/driver/oss/oss_test.go +++ b/docs/storage/driver/oss/oss_test.go @@ -1,3 +1,5 @@ +// +build include_oss + package oss import ( From 90595c7ed9aef1a9c4a16d97083fa70db23bb784 Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 31 Jul 2015 12:39:55 +0800 Subject: [PATCH 250/501] fix goimports Signed-off-by: tgic --- docs/storage/driver/oss/oss.go | 3 ++- docs/storage/driver/oss/oss_test.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index cbda6d16..108ad475 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -14,7 +14,6 @@ package oss import ( "bytes" "fmt" - "github.com/docker/distribution/context" "io" "io/ioutil" "net/http" @@ -24,6 +23,8 @@ import ( "sync" "time" + "github.com/docker/distribution/context" + "github.com/Sirupsen/logrus" "github.com/denverdino/aliyungo/oss" storagedriver "github.com/docker/distribution/registry/storage/driver" diff --git a/docs/storage/driver/oss/oss_test.go b/docs/storage/driver/oss/oss_test.go index 56ec3208..fbae5d9c 100644 --- a/docs/storage/driver/oss/oss_test.go +++ b/docs/storage/driver/oss/oss_test.go @@ -3,11 +3,12 @@ package oss import ( + "io/ioutil" + alioss "github.com/denverdino/aliyungo/oss" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - "io/ioutil" //"log" "os" "strconv" From f01a0694c14b4ce835ebea13a978e5a125fd0d0f Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 31 Jul 2015 12:46:54 +0800 Subject: [PATCH 251/501] remove unused code and fix todo format Signed-off-by: tgic --- docs/storage/driver/oss/oss.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 108ad475..cec32026 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -209,20 +209,8 @@ func New(params DriverParameters) (*Driver, error) { return nil, err } - // TODO Currently multipart uploads have no timestamps, so this would be unwise + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new OSS client while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } d := &driver{ Client: client, From 83c8617cb1ff258344c8cecb86fbd48521f8d29d Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 31 Jul 2015 16:43:01 -0700 Subject: [PATCH 252/501] Spelling corrections Signed-off-by: Richard Scothern --- docs/api/v2/descriptors.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 74bdb9f2..67b5c129 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -536,7 +536,7 @@ var routeDescriptors = []RouteDescriptor{ }, Successes: []ResponseDescriptor{ { - Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ digestHeader, @@ -928,7 +928,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUpload, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", - Entity: "Intiate Blob Upload", + Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ { From 8fbc1de08140fce66690fc7b498a9028a8458966 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 27 Jul 2015 10:00:00 -0700 Subject: [PATCH 253/501] Use CloseNotifier to supress spurious HTTP 400 errors on early disconnect When a client disconnects without completing a HTTP request, we were attempting to process the partial request, which usually leads to a 400 error. These errors can pollute the logs and make it more difficult to track down real bugs. This change uses CloseNotifier to detect disconnects. In combination with checking Content-Length, we can detect a disconnect before sending the full payload, and avoid logging a 400 error. This logic is only applied to PUT, POST, and PATCH endpoints, as these are the places where disconnects during a request are most likely to happen. Signed-off-by: Aaron Lehmann --- docs/handlers/blobupload.go | 44 +++++++++++++++++++++++++++++++++++-- docs/handlers/images.go | 30 +++++++++++++++++++++++-- 2 files changed, 70 insertions(+), 4 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 8dc417ba..84bf26c5 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -170,8 +170,28 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // TODO(dmcgowan): support Content-Range header to seek and write range + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + panic("the ResponseWriter does not implement CloseNotifier") + } + // Copy the data - if _, err := io.Copy(buh.Upload, r.Body); err != nil { + copied, err := io.Copy(buh.Upload, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + ctxu.GetLogger(buh).Error("client disconnected during blob PATCH") + return + default: + } + } + if err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return @@ -211,8 +231,28 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + panic("the ResponseWriter does not implement CloseNotifier") + } + // Read in the data, if any. - if _, err := io.Copy(buh.Upload, r.Body); err != nil { + copied, err := io.Copy(buh.Upload, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + ctxu.GetLogger(buh).Error("client disconnected during blob PUT") + return + default: + } + } + if err != nil { ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 01f9b7a2..42b2ea48 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -3,6 +3,7 @@ package handlers import ( "encoding/json" "fmt" + "io/ioutil" "net/http" "strings" @@ -112,10 +113,35 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - dec := json.NewDecoder(r.Body) + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + panic("the ResponseWriter does not implement CloseNotifier") + } + + // Copy the data + jsonBytes, err := ioutil.ReadAll(r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && int64(len(jsonBytes)) < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + ctxu.GetLogger(imh).Error("client disconnected during image manifest PUT") + return + default: + } + } + if err != nil { + ctxu.GetLogger(imh).Errorf("unknown error reading payload: %v", err) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } var manifest manifest.SignedManifest - if err := dec.Decode(&manifest); err != nil { + if err := json.Unmarshal(jsonBytes, &manifest); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } From b0d133045d3bbdb45bafe8c1fc37a8d6682036b5 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 29 Jul 2015 18:18:50 -0700 Subject: [PATCH 254/501] Factor CloseNotifier use into a new function Signed-off-by: Aaron Lehmann --- docs/handlers/blobupload.go | 53 +++---------------------------------- docs/handlers/helpers.go | 39 +++++++++++++++++++++++++++ docs/handlers/images.go | 31 ++++------------------ 3 files changed, 48 insertions(+), 75 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 84bf26c5..1d1c1009 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -2,7 +2,6 @@ package handlers import ( "fmt" - "io" "net/http" "net/url" "os" @@ -170,30 +169,8 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // TODO(dmcgowan): support Content-Range header to seek and write range - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - panic("the ResponseWriter does not implement CloseNotifier") - } - - // Copy the data - copied, err := io.Copy(buh.Upload, r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - ctxu.GetLogger(buh).Error("client disconnected during blob PATCH") - return - default: - } - } - if err != nil { - ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary return } @@ -231,30 +208,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - panic("the ResponseWriter does not implement CloseNotifier") - } - - // Read in the data, if any. - copied, err := io.Copy(buh.Upload, r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - ctxu.GetLogger(buh).Error("client disconnected during blob PUT") - return - default: - } - } - if err != nil { - ctxu.GetLogger(buh).Errorf("unknown error copying into upload: %v", err) - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { + // copyFullPayload reports the error if necessary return } diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index e2d220c2..abbcb1be 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -1,8 +1,12 @@ package handlers import ( + "errors" "io" "net/http" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" ) // closeResources closes all the provided resources after running the target @@ -15,3 +19,38 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { handler.ServeHTTP(w, r) }) } + +// copyFullPayload copies the payload of a HTTP request to destWriter. If it +// receives less content than expected, and the client disconnected during the +// upload, it avoids sending a 400 error to keep the logs cleaner. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { + // Get a channel that tells us if the client disconnects + var clientClosed <-chan bool + if notifier, ok := responseWriter.(http.CloseNotifier); ok { + clientClosed = notifier.CloseNotify() + } else { + panic("the ResponseWriter does not implement CloseNotifier") + } + + // Read in the data, if any. + copied, err := io.Copy(destWriter, r.Body) + if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { + // Didn't recieve as much content as expected. Did the client + // disconnect during the request? If so, avoid returning a 400 + // error to keep the logs cleaner. + select { + case <-clientClosed: + ctxu.GetLogger(context).Error("client disconnected during " + action) + return errors.New("client disconnected") + default: + } + } + + if err != nil { + ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) + return err + } + + return nil +} diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 42b2ea48..dbe7b706 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -1,9 +1,9 @@ package handlers import ( + "bytes" "encoding/json" "fmt" - "io/ioutil" "net/http" "strings" @@ -113,35 +113,14 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := w.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - panic("the ResponseWriter does not implement CloseNotifier") - } - - // Copy the data - jsonBytes, err := ioutil.ReadAll(r.Body) - if clientClosed != nil && (err != nil || (r.ContentLength > 0 && int64(len(jsonBytes)) < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client - // disconnect during the request? If so, avoid returning a 400 - // error to keep the logs cleaner. - select { - case <-clientClosed: - ctxu.GetLogger(imh).Error("client disconnected during image manifest PUT") - return - default: - } - } - if err != nil { - ctxu.GetLogger(imh).Errorf("unknown error reading payload: %v", err) - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + var jsonBuf bytes.Buffer + if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + // copyFullPayload reports the error if necessary return } var manifest manifest.SignedManifest - if err := json.Unmarshal(jsonBytes, &manifest); err != nil { + if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } From b51913f6198ccacdc408c0d2af583f15f2889820 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 31 Jul 2015 17:39:30 -0700 Subject: [PATCH 255/501] Set the response code to 499 when a client disconnects during an upload The response code isn't actually sent to the client, because the connection has already closed by this point. But it causes the status code to appear as 499 in the logs instead of 0. Signed-off-by: Aaron Lehmann --- docs/handlers/helpers.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index abbcb1be..1f9a8ee1 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -40,6 +40,12 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // error to keep the logs cleaner. select { case <-clientClosed: + // Set the response code to "499 Client Closed Request" + // Even though the connection has already been closed, + // this causes the logger to pick up a 499 error + // instead of showing 0 for the HTTP status. + responseWriter.WriteHeader(499) + ctxu.GetLogger(context).Error("client disconnected during " + action) return errors.New("client disconnected") default: From 405633610000f5665ebb312661454b997285232e Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 3 Aug 2015 11:59:19 -0700 Subject: [PATCH 256/501] Add blob delete entry to api description and regenerate api.md Signed-off-by: Richard Scothern --- docs/api/v2/descriptors.go | 69 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 3 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 67b5c129..0ef64f88 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -519,7 +519,7 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameManifest, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", - Description: "Create, update and retrieve manifests.", + Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", @@ -768,9 +768,8 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameBlob, Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", - Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ - { Method: "GET", Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", @@ -919,6 +918,70 @@ var routeDescriptors = []RouteDescriptor{ }, }, }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Delete is not enabled on the registry", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + // TODO(stevvooe): We may want to add a PUT request here to // kickoff an upload of a blob, integrated with the blob upload // API. From 54f0c70d88a95ff2aa37e7fdb01c691c1c9c97e2 Mon Sep 17 00:00:00 2001 From: Doug Davis Date: Fri, 31 Jul 2015 14:51:48 -0700 Subject: [PATCH 257/501] Fix vet issue registry/storage/blob_test.go:149: arg d for printf verb %s of wrong type: github.com/docker/distribution.Descriptor Signed-off-by: Doug Davis --- docs/storage/blob_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 7719bab1..8f6fb6f2 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -146,7 +146,7 @@ func TestSimpleBlobUpload(t *testing.T) { d, err := bs.Stat(ctx, desc.Digest) if err == nil { - t.Fatalf("unexpected non-error stating deleted blob: %s", d) + t.Fatalf("unexpected non-error stating deleted blob: %v", d) } switch err { From d1cb12fa3dda1a268a41dc5a613e1640aba7300d Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 29 Jul 2015 11:12:01 -0700 Subject: [PATCH 258/501] Add pull through cache functionality to the Registry which can be configured with a new `proxy` section in the configuration file. Create a new registry type which delegates storage to a proxyBlobStore and proxyManifestStore. These stores will pull through data if not present locally. proxyBlobStore takes care not to write duplicate data to disk. Add a scheduler to cleanup expired content. The scheduler runs as a background goroutine. When a blob or manifest is pulled through from the remote registry, an entry is added to the scheduler with a TTL. When the TTL expires the scheduler calls a pre-specified function to remove the fetched resource. Add token authentication to the registry middleware. Get a token at startup and preload the credential store with the username and password supplied in the config file. Allow resumable digest functionality to be disabled at runtime and disable it when the registry is a pull through cache. Signed-off-by: Richard Scothern --- docs/client/blob_writer.go | 4 + docs/client/repository.go | 3 +- docs/client/repository_test.go | 2 +- docs/handlers/app.go | 33 ++- docs/handlers/app_test.go | 2 +- docs/middleware/registry/middleware.go | 7 +- docs/middleware/repository/middleware.go | 7 +- docs/proxy/proxyauth.go | 54 +++++ docs/proxy/proxyblobstore.go | 214 +++++++++++++++++++ docs/proxy/proxyblobstore_test.go | 231 +++++++++++++++++++++ docs/proxy/proxymanifeststore.go | 155 ++++++++++++++ docs/proxy/proxymanifeststore_test.go | 235 +++++++++++++++++++++ docs/proxy/proxymetrics.go | 74 +++++++ docs/proxy/proxyregistry.go | 139 +++++++++++++ docs/proxy/scheduler/scheduler.go | 250 +++++++++++++++++++++++ docs/proxy/scheduler/scheduler_test.go | 165 +++++++++++++++ docs/storage/blob_test.go | 8 +- docs/storage/blobwriter.go | 28 +++ docs/storage/blobwriter_resumable.go | 8 + docs/storage/catalog_test.go | 2 +- docs/storage/linkedblobstore.go | 22 +- docs/storage/manifeststore_test.go | 4 +- docs/storage/registry.go | 6 +- docs/storage/vacuum.go | 67 ++++++ 24 files changed, 1682 insertions(+), 38 deletions(-) create mode 100644 docs/proxy/proxyauth.go create mode 100644 docs/proxy/proxyblobstore.go create mode 100644 docs/proxy/proxyblobstore_test.go create mode 100644 docs/proxy/proxymanifeststore.go create mode 100644 docs/proxy/proxymanifeststore_test.go create mode 100644 docs/proxy/proxymetrics.go create mode 100644 docs/proxy/proxyregistry.go create mode 100644 docs/proxy/scheduler/scheduler.go create mode 100644 docs/proxy/scheduler/scheduler_test.go create mode 100644 docs/storage/vacuum.go diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 5f6f01f7..c7eee4e8 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -25,6 +25,10 @@ type httpBlobUpload struct { closed bool } +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown diff --git a/docs/client/repository.go b/docs/client/repository.go index d0079f09..c1e8e07f 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -280,14 +280,13 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic } if _, ok := ms.etags[tag]; ok { - req.Header.Set("eTag", ms.etags[tag]) + req.Header.Set("If-None-Match", ms.etags[tag]) } resp, err := ms.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { return nil, nil } else if SuccessStatus(resp.StatusCode) { diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 7219fff1..26201763 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -463,7 +463,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Method: "GET", Route: "/v2/" + repo + "/manifests/" + reference, Headers: http.Header(map[string][]string{ - "Etag": {fmt.Sprintf(`"%s"`, dgst)}, + "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 1fcf13fc..f60290d0 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -20,6 +20,7 @@ import ( "github.com/docker/distribution/registry/auth" registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/proxy" "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" rediscache "github.com/docker/distribution/registry/storage/cache/redis" @@ -55,6 +56,9 @@ type App struct { } redis *redis.Pool + + // true if this registry is configured as a pull through cache + isCache bool } // NewApp takes a configuration and returns a configured app, ready to serve @@ -65,6 +69,7 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App Config: configuration, Context: ctx, router: v2.RouterWithPrefix(configuration.HTTP.Prefix), + isCache: configuration.Proxy.RemoteURL != "", } app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) @@ -152,10 +157,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled) + app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled, app.isCache) ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled) + app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled, app.isCache) ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { @@ -166,10 +171,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled) + app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled, app.isCache) } - app.registry, err = applyRegistryMiddleware(app.registry, configuration.Middleware["registry"]) + app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) if err != nil { panic(err) } @@ -185,6 +190,16 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App ctxu.GetLogger(app).Debugf("configured %q access controller", authType) } + // configure as a pull through cache + if configuration.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if err != nil { + panic(err.Error()) + } + app.isCache = true + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) + } + return app } @@ -447,7 +462,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { repository, app.eventBridge(context, r)) - context.Repository, err = applyRepoMiddleware(context.Repository, app.Config.Middleware["repository"]) + context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -668,9 +683,9 @@ func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []a } // applyRegistryMiddleware wraps a registry instance with the configured middlewares -func applyRegistryMiddleware(registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { +func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { - rmw, err := registrymiddleware.Get(mw.Name, mw.Options, registry) + rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) if err != nil { return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) } @@ -681,9 +696,9 @@ func applyRegistryMiddleware(registry distribution.Namespace, middlewares []conf } // applyRepoMiddleware wraps a repository with the configured middlewares -func applyRepoMiddleware(repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { +func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { for _, mw := range middlewares { - rmw, err := repositorymiddleware.Get(mw.Name, mw.Options, repository) + rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) if err != nil { return nil, err } diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 84d842e3..6f597527 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -31,7 +31,7 @@ func TestAppDispatcher(t *testing.T) { Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true), + registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true, false), } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go index 048603b8..7535c6db 100644 --- a/docs/middleware/registry/middleware.go +++ b/docs/middleware/registry/middleware.go @@ -4,11 +4,12 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/context" ) // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. -type InitFunc func(registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) +type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc @@ -28,10 +29,10 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RegistryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { +func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { - return initFunc(registry, options) + return initFunc(ctx, registry, options) } } diff --git a/docs/middleware/repository/middleware.go b/docs/middleware/repository/middleware.go index d6330fc4..27b42aec 100644 --- a/docs/middleware/repository/middleware.go +++ b/docs/middleware/repository/middleware.go @@ -4,11 +4,12 @@ import ( "fmt" "github.com/docker/distribution" + "github.com/docker/distribution/context" ) // InitFunc is the type of a RepositoryMiddleware factory function and is // used to register the constructor for different RepositoryMiddleware backends. -type InitFunc func(repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) +type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) var middlewares map[string]InitFunc @@ -28,10 +29,10 @@ func Register(name string, initFunc InitFunc) error { } // Get constructs a RepositoryMiddleware with the given options using the named backend. -func Get(name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { +func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { - return initFunc(repository, options) + return initFunc(ctx, repository, options) } } diff --git a/docs/proxy/proxyauth.go b/docs/proxy/proxyauth.go new file mode 100644 index 00000000..e4bec75a --- /dev/null +++ b/docs/proxy/proxyauth.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution/registry/client/auth" +) + +const tokenURL = "https://auth.docker.io/token" + +type userpass struct { + username string + password string +} + +type credentials struct { + creds map[string]userpass +} + +func (c credentials) Basic(u *url.URL) (string, string) { + up := c.creds[u.String()] + + return up.username, up.password +} + +// ConfigureAuth authorizes with the upstream registry +func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { + if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { + return nil, err + } + + creds := map[string]userpass{ + tokenURL: { + username: username, + password: password, + }, + } + return credentials{creds: creds}, nil +} + +func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { + resp, err := http.Get(endpoint) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := manager.AddResponse(resp); err != nil { + return err + } + + return nil +} diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go new file mode 100644 index 00000000..b480a111 --- /dev/null +++ b/docs/proxy/proxyblobstore.go @@ -0,0 +1,214 @@ +package proxy + +import ( + "io" + "net/http" + "strconv" + "sync" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config file +const blobTTL = time.Duration(24 * 7 * time.Hour) + +type proxyBlobStore struct { + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.BlobStore = proxyBlobStore{} + +type inflightBlob struct { + refCount int + bw distribution.BlobWriter +} + +// inflight tracks currently downloading blobs +var inflight = make(map[digest.Digest]*inflightBlob) + +// mu protects inflight +var mu sync.Mutex + +func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { + w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) + w.Header().Set("Content-Type", mediaType) + w.Header().Set("Docker-Content-Digest", digest.String()) + w.Header().Set("Etag", digest.String()) +} + +func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil && err != distribution.ErrBlobUnknown { + return err + } + + if err == nil { + proxyMetrics.BlobPush(uint64(desc.Size)) + return pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + desc, err = pbs.remoteStore.Stat(ctx, dgst) + if err != nil { + return err + } + + remoteReader, err := pbs.remoteStore.Open(ctx, dgst) + if err != nil { + return err + } + + bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) + if err != nil { + return err + } + defer cleanup() + + if isNew { + go func() { + err := streamToStorage(ctx, remoteReader, desc, bw) + if err != nil { + context.GetLogger(ctx).Error(err) + } + + proxyMetrics.BlobPull(uint64(desc.Size)) + }() + err := streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + pbs.scheduler.AddBlob(dgst.String(), blobTTL) + return nil + } + + err = streamToClient(ctx, w, desc, bw) + if err != nil { + return err + } + proxyMetrics.BlobPush(uint64(desc.Size)) + return nil +} + +type cleanupFunc func() + +// getOrCreateBlobWriter will track which blobs are currently being downloaded and enable client requesting +// the same blob concurrently to read from the existing stream. +func getOrCreateBlobWriter(ctx context.Context, blobs distribution.BlobService, desc distribution.Descriptor) (distribution.BlobWriter, bool, cleanupFunc, error) { + mu.Lock() + defer mu.Unlock() + dgst := desc.Digest + + cleanup := func() { + mu.Lock() + defer mu.Unlock() + inflight[dgst].refCount-- + + if inflight[dgst].refCount == 0 { + defer delete(inflight, dgst) + _, err := inflight[dgst].bw.Commit(ctx, desc) + if err != nil { + // There is a narrow race here where Commit can be called while this blob's TTL is expiring + // and its being removed from storage. In that case, the client stream will continue + // uninterruped and the blob will be pulled through on the next request, so just log it + context.GetLogger(ctx).Errorf("Error committing blob: %q", err) + } + + } + } + + var bw distribution.BlobWriter + _, ok := inflight[dgst] + if ok { + bw = inflight[dgst].bw + inflight[dgst].refCount++ + return bw, false, cleanup, nil + } + + var err error + bw, err = blobs.Create(ctx) + if err != nil { + return nil, false, nil, err + } + + inflight[dgst] = &inflightBlob{refCount: 1, bw: bw} + return bw, true, cleanup, nil +} + +func streamToStorage(ctx context.Context, remoteReader distribution.ReadSeekCloser, desc distribution.Descriptor, bw distribution.BlobWriter) error { + _, err := io.CopyN(bw, remoteReader, desc.Size) + if err != nil { + return err + } + + return nil +} + +func streamToClient(ctx context.Context, w http.ResponseWriter, desc distribution.Descriptor, bw distribution.BlobWriter) error { + setResponseHeaders(w, desc.Size, desc.MediaType, desc.Digest) + + reader, err := bw.Reader() + if err != nil { + return err + } + defer reader.Close() + teeReader := io.TeeReader(reader, w) + buf := make([]byte, 32768, 32786) + var soFar int64 + for { + rd, err := teeReader.Read(buf) + if err == nil || err == io.EOF { + soFar += int64(rd) + if soFar < desc.Size { + // buffer underflow, keep trying + continue + } + return nil + } + return err + } +} + +func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := pbs.localStore.Stat(ctx, dgst) + if err == nil { + return desc, err + } + + if err != distribution.ErrBlobUnknown { + return distribution.Descriptor{}, err + } + + return pbs.remoteStore.Stat(ctx, dgst) +} + +// Unsupported functions +func (pbs proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return nil, distribution.ErrUnsupported +} + +func (pbs proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + return distribution.ErrUnsupported +} diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go new file mode 100644 index 00000000..65d5f922 --- /dev/null +++ b/docs/proxy/proxyblobstore_test.go @@ -0,0 +1,231 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type statsBlobStore struct { + stats map[string]int + blobs distribution.BlobStore +} + +func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbs.stats["put"]++ + return sbs.blobs.Put(ctx, mediaType, p) +} + +func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbs.stats["get"]++ + return sbs.blobs.Get(ctx, dgst) +} + +func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + sbs.stats["create"]++ + return sbs.blobs.Create(ctx) +} + +func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbs.stats["resume"]++ + return sbs.blobs.Resume(ctx, id) +} + +func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbs.stats["open"]++ + return sbs.blobs.Open(ctx, dgst) +} + +func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbs.stats["serveblob"]++ + return sbs.blobs.ServeBlob(ctx, w, r, dgst) +} + +func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + sbs.stats["stat"]++ + return sbs.blobs.Stat(ctx, dgst) +} + +func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbs.stats["delete"]++ + return sbs.blobs.Delete(ctx, dgst) +} + +type testEnv struct { + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context +} + +func (te testEnv) LocalStats() *map[string]int { + ls := te.store.localStore.(statsBlobStore).stats + return &ls +} + +func (te testEnv) RemoteStats() *map[string]int { + rs := te.store.remoteStore.(statsBlobStore).stats + return &rs +} + +// Populate remote store and record the digests +func makeTestEnv(t *testing.T, name string) testEnv { + ctx := context.Background() + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + truthBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: truthRepo.Blobs(ctx), + } + + localBlobs := statsBlobStore{ + stats: make(map[string]int), + blobs: localRepo.Blobs(ctx), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + + proxyBlobStore := proxyBlobStore{ + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + } + + te := testEnv{ + store: proxyBlobStore, + ctx: ctx, + } + return te +} + +func populate(t *testing.T, te *testEnv, blobCount int) { + var inRemote []distribution.Descriptor + for i := 0; i < blobCount; i++ { + bytes := []byte(fmt.Sprintf("blob%d", i)) + + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Errorf("Put in store") + } + inRemote = append(inRemote, desc) + } + + te.inRemote = inRemote + +} + +func TestProxyStoreStat(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Stat - touches both stores + for _, d := range te.inRemote { + _, err := te.store.Stat(te.ctx, d.Digest) + if err != nil { + t.Fatalf("Error stating proxy store") + } + } + + if (*localStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected local stat count") + } + + if (*remoteStats)["stat"] != remoteBlobCount { + t.Errorf("Unexpected remote stat count") + } +} + +func TestProxyStoreServe(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 + populate(t, &te, remoteBlobCount) + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + // Serveblob - pulls through blobs + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + if (*localStats)["stat"] != remoteBlobCount && (*localStats)["create"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + t.Fatalf("unexpected local stats") + } + + // Serveblob - blobs come from local + for _, dr := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + dl, err := digest.FromBytes(w.Body.Bytes()) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if dl != dr.Digest { + t.Errorf("Mismatching blob fetch from proxy") + } + } + + // Stat to find local, but no new blobs were created + if (*localStats)["stat"] != remoteBlobCount*2 && (*localStats)["create"] != remoteBlobCount*2 { + t.Fatalf("unexpected local stats") + } + + // Remote unchanged + if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { + fmt.Printf("\tlocal=%#v, \n\tremote=%#v\n", localStats, remoteStats) + t.Fatalf("unexpected local stats") + } + +} diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go new file mode 100644 index 00000000..5b79c8ce --- /dev/null +++ b/docs/proxy/proxymanifeststore.go @@ -0,0 +1,155 @@ +package proxy + +import ( + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/proxy/scheduler" +) + +// todo(richardscothern): from cache control header or config +const repositoryTTL = time.Duration(24 * 7 * time.Hour) + +type proxyManifestStore struct { + ctx context.Context + localManifests distribution.ManifestService + remoteManifests distribution.ManifestService + repositoryName string + scheduler *scheduler.TTLExpirationScheduler +} + +var _ distribution.ManifestService = &proxyManifestStore{} + +func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(dgst) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.Exists(dgst) +} + +func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm, err := pms.localManifests.Get(dgst) + if err == nil { + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + return sm, err + } + + sm, err = pms.remoteManifests.Get(dgst) + if err != nil { + return nil, err + } + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + // Schedule the repo for removal + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + // Ensure the manifest blob is cleaned up + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func (pms proxyManifestStore) Tags() ([]string, error) { + return pms.localManifests.Tags() +} + +func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { + exists, err := pms.localManifests.ExistsByTag(tag) + if err != nil { + return false, err + } + if exists { + return true, nil + } + + return pms.remoteManifests.ExistsByTag(tag) +} + +func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + var localDigest digest.Digest + + localManifest, err := pms.localManifests.GetByTag(tag, options...) + switch err.(type) { + case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: + goto fromremote + case nil: + break + default: + return nil, err + } + + localDigest, err = manifestDigest(localManifest) + if err != nil { + return nil, err + } + +fromremote: + var sm *manifest.SignedManifest + sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) + if err != nil { + return nil, err + } + + if sm == nil { + context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) + return localManifest, nil + } + context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) + + err = pms.localManifests.Put(sm) + if err != nil { + return nil, err + } + + dgst, err := manifestDigest(sm) + if err != nil { + return nil, err + } + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + proxyMetrics.ManifestPull(uint64(len(sm.Raw))) + proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + + return sm, err +} + +func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { + payload, err := sm.Payload() + if err != nil { + return "", err + + } + + dgst, err := digest.FromBytes(payload) + if err != nil { + return "", err + } + + return dgst, nil +} + +func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { + return v2.ErrorCodeUnsupported +} + +func (pms proxyManifestStore) Delete(dgst digest.Digest) error { + return v2.ErrorCodeUnsupported +} diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go new file mode 100644 index 00000000..7b9b8091 --- /dev/null +++ b/docs/proxy/proxymanifeststore_test.go @@ -0,0 +1,235 @@ +package proxy + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" +) + +type statsManifest struct { + manifests distribution.ManifestService + stats map[string]int +} + +type manifestStoreTestEnv struct { + manifestDigest digest.Digest // digest of the signed manifest in the local storage + manifests proxyManifestStore +} + +func (te manifestStoreTestEnv) LocalStats() *map[string]int { + ls := te.manifests.localManifests.(statsManifest).stats + return &ls +} + +func (te manifestStoreTestEnv) RemoteStats() *map[string]int { + rs := te.manifests.remoteManifests.(statsManifest).stats + return &rs +} + +func (sm statsManifest) Delete(dgst digest.Digest) error { + sm.stats["delete"]++ + return sm.manifests.Delete(dgst) +} + +func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { + sm.stats["exists"]++ + return sm.manifests.Exists(dgst) +} + +func (sm statsManifest) ExistsByTag(tag string) (bool, error) { + sm.stats["existbytag"]++ + return sm.manifests.ExistsByTag(tag) +} + +func (sm statsManifest) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { + sm.stats["get"]++ + return sm.manifests.Get(dgst) +} + +func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { + sm.stats["getbytag"]++ + return sm.manifests.GetByTag(tag, options...) +} + +func (sm statsManifest) Put(manifest *manifest.SignedManifest) error { + sm.stats["put"]++ + return sm.manifests.Put(manifest) +} + +func (sm statsManifest) Tags() ([]string, error) { + sm.stats["tags"]++ + return sm.manifests.Tags() +} + +func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + ctx := context.Background() + truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRepo, err := truthRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + tr, err := truthRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + truthManifests := statsManifest{ + manifests: tr, + stats: make(map[string]int), + } + + manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) + if err != nil { + t.Fatalf(err.Error()) + } + + localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRepo, err := localRegistry.Repository(ctx, name) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + lr, err := localRepo.Manifests(ctx) + if err != nil { + t.Fatal(err.Error()) + } + + localManifests := statsManifest{ + manifests: lr, + stats: make(map[string]int), + } + + s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") + return &manifestStoreTestEnv{ + manifestDigest: manifestDigest, + manifests: proxyManifestStore{ + ctx: ctx, + localManifests: localManifests, + remoteManifests: truthManifests, + scheduler: s, + }, + } +} + +func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { + m := manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + } + + for i := 0; i < 2; i++ { + wr, err := repository.Blobs(ctx).Create(ctx) + if err != nil { + t.Fatalf("unexpected error creating test upload: %v", err) + } + + rs, ts, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("unexpected error generating test layer file") + } + dgst := digest.Digest(ts) + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("unexpected error copying to upload: %v", err) + } + + if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("unexpected error finishing upload: %v", err) + } + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := manifest.Sign(&m, pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + ms, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + ms.Put(sm) + if err != nil { + t.Fatalf("unexpected errors putting manifest: %v", err) + } + pl, err := sm.Payload() + if err != nil { + t.Fatal(err) + } + return digest.FromBytes(pl) +} + +// TestProxyManifests contains basic acceptance tests +// for the pull-through behavior +func TestProxyManifests(t *testing.T) { + name := "foo/bar" + env := newManifestStoreTestEnv(t, name, "latest") + + localStats := env.LocalStats() + remoteStats := env.RemoteStats() + + // Stat - must check local and remote + exists, err := env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatalf("Error checking existance") + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + } + + // Get - should succeed and pull manifest into local + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected get count") + } + + if (*localStats)["put"] != 1 { + t.Errorf("Expected local put") + } + + // Stat - should only go to local + exists, err = env.manifests.ExistsByTag("latest") + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("Unexpected non-existant manifest") + } + + if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { + t.Errorf("Unexpected exists count") + + } + + // Get - should get from remote, to test freshness + _, err = env.manifests.Get(env.manifestDigest) + if err != nil { + t.Fatal(err) + } + + if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected get count") + } + +} diff --git a/docs/proxy/proxymetrics.go b/docs/proxy/proxymetrics.go new file mode 100644 index 00000000..d3d84d78 --- /dev/null +++ b/docs/proxy/proxymetrics.go @@ -0,0 +1,74 @@ +package proxy + +import ( + "expvar" + "sync/atomic" +) + +// Metrics is used to hold metric counters +// related to the proxy +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 + BytesPulled uint64 + BytesPushed uint64 +} + +type proxyMetricsCollector struct { + blobMetrics Metrics + manifestMetrics Metrics +} + +// BlobPull tracks metrics about blobs pulled into the cache +func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.blobMetrics.Misses, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) +} + +// BlobPush tracks metrics about blobs pushed to clients +func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.blobMetrics.Requests, 1) + atomic.AddUint64(&pmc.blobMetrics.Hits, 1) + atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) +} + +// ManifestPull tracks metrics related to Manifests pulled into the cache +func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) +} + +// ManifestPush tracks metrics about manifests pushed to clients +func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { + atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) + atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) + atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) +} + +// proxyMetrics tracks metrics about the proxy cache. This is +// kept globally and made available via expvar. +var proxyMetrics = &proxyMetricsCollector{} + +func init() { + registry := expvar.Get("registry") + if registry == nil { + registry = expvar.NewMap("registry") + } + + pm := registry.(*expvar.Map).Get("proxy") + if pm == nil { + pm = &expvar.Map{} + pm.(*expvar.Map).Init() + registry.(*expvar.Map).Set("proxy", pm) + } + + pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { + return proxyMetrics.blobMetrics + })) + + pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { + return proxyMetrics.manifestMetrics + })) + +} diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go new file mode 100644 index 00000000..e9dec2f7 --- /dev/null +++ b/docs/proxy/proxyregistry.go @@ -0,0 +1,139 @@ +package proxy + +import ( + "net/http" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/proxy/scheduler" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" +) + +// proxyingRegistry fetches content from a remote registry and caches it locally +type proxyingRegistry struct { + embedded distribution.Namespace // provides local registry functionality + + scheduler *scheduler.TTLExpirationScheduler + + remoteURL string + credentialStore auth.CredentialStore + challengeManager auth.ChallengeManager +} + +// NewRegistryPullThroughCache creates a registry acting as a pull through cache +func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { + _, err := url.Parse(config.RemoteURL) + if err != nil { + return nil, err + } + + v := storage.NewVacuum(ctx, driver) + + s := scheduler.New(ctx, driver, "/scheduler-state.json") + s.OnBlobExpire(func(digest string) error { + return v.RemoveBlob(digest) + }) + s.OnManifestExpire(func(repoName string) error { + return v.RemoveRepository(repoName) + }) + err = s.Start() + if err != nil { + return nil, err + } + + challengeManager := auth.NewSimpleChallengeManager() + cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + if err != nil { + return nil, err + } + + return &proxyingRegistry{ + embedded: registry, + scheduler: s, + challengeManager: challengeManager, + credentialStore: cs, + remoteURL: config.RemoteURL, + }, nil +} + +func (pr *proxyingRegistry) Scope() distribution.Scope { + return distribution.GlobalScope +} + +func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + return pr.embedded.Repositories(ctx, repos, last) +} + +func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { + tr := transport.NewTransport(http.DefaultTransport, + auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + + localRepo, err := pr.embedded.Repository(ctx, name) + if err != nil { + return nil, err + } + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) + if err != nil { + return nil, err + } + + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + if err != nil { + return nil, err + } + + remoteManifests, err := remoteRepo.Manifests(ctx) + if err != nil { + return nil, err + } + + return &proxiedRepository{ + blobStore: proxyBlobStore{ + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + }, + manifests: proxyManifestStore{ + repositoryName: name, + localManifests: localManifests, // Options? + remoteManifests: remoteManifests, + ctx: ctx, + scheduler: pr.scheduler, + }, + name: name, + signatures: localRepo.Signatures(), + }, nil +} + +// proxiedRepository uses proxying blob and manifest services to serve content +// locally, or pulling it through from a remote and caching it locally if it doesn't +// already exist +type proxiedRepository struct { + blobStore distribution.BlobStore + manifests distribution.ManifestService + name string + signatures distribution.SignatureService +} + +func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // options + return pr.manifests, nil +} + +func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { + return pr.blobStore +} + +func (pr *proxiedRepository) Name() string { + return pr.name +} + +func (pr *proxiedRepository) Signatures() distribution.SignatureService { + return pr.signatures +} diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go new file mode 100644 index 00000000..056b148a --- /dev/null +++ b/docs/proxy/scheduler/scheduler.go @@ -0,0 +1,250 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +// onTTLExpiryFunc is called when a repositories' TTL expires +type expiryFunc func(string) error + +const ( + entryTypeBlob = iota + entryTypeManifest +) + +// schedulerEntry represents an entry in the scheduler +// fields are exported for serialization +type schedulerEntry struct { + Key string `json:"Key"` + Expiry time.Time `json:"ExpiryData"` + EntryType int `json:"EntryType"` +} + +// New returns a new instance of the scheduler +func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { + return &TTLExpirationScheduler{ + entries: make(map[string]schedulerEntry), + addChan: make(chan schedulerEntry), + stopChan: make(chan bool), + driver: driver, + pathToStateFile: path, + ctx: ctx, + stopped: true, + } +} + +// TTLExpirationScheduler is a scheduler used to perform actions +// when TTLs expire +type TTLExpirationScheduler struct { + entries map[string]schedulerEntry + addChan chan schedulerEntry + stopChan chan bool + + driver driver.StorageDriver + ctx context.Context + pathToStateFile string + + stopped bool + + onBlobExpire expiryFunc + onManifestExpire expiryFunc +} + +// addChan allows more TTLs to be pushed to the scheduler +type addChan chan schedulerEntry + +// stopChan allows the scheduler to be stopped - used for testing. +type stopChan chan bool + +// OnBlobExpire is called when a scheduled blob's TTL expires +func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.onBlobExpire = f +} + +// OnManifestExpire is called when a scheduled manifest's TTL expires +func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.onManifestExpire = f +} + +// AddBlob schedules a blob cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + ttles.add(dgst, ttl, entryTypeBlob) + return nil +} + +// AddManifest schedules a manifest cleanup after ttl expires +func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { + if ttles.stopped { + return fmt.Errorf("scheduler not started") + } + + ttles.add(repoName, ttl, entryTypeManifest) + return nil +} + +// Start starts the scheduler +func (ttles *TTLExpirationScheduler) Start() error { + return ttles.start() +} + +func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { + entry := schedulerEntry{ + Key: key, + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + ttles.addChan <- entry +} + +func (ttles *TTLExpirationScheduler) stop() { + ttles.stopChan <- true +} + +func (ttles *TTLExpirationScheduler) start() error { + err := ttles.readState() + if err != nil { + return err + } + + if !ttles.stopped { + return fmt.Errorf("Scheduler already started") + } + + context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + ttles.stopped = false + go ttles.mainloop() + + return nil +} + +// mainloop uses a select statement to listen for events. Most of its time +// is spent in waiting on a TTL to expire but can be interrupted when TTLs +// are added. +func (ttles *TTLExpirationScheduler) mainloop() { + for { + if ttles.stopped { + return + } + + nextEntry, ttl := nextExpiringEntry(ttles.entries) + if len(ttles.entries) == 0 { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Nothing to do, sleeping...") + } else { + context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Sleeping for %s until cleanup of %s", ttl, nextEntry.Key) + } + + select { + case <-time.After(ttl): + var f expiryFunc + + switch nextEntry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(repoName string) error { + return fmt.Errorf("Unexpected scheduler entry type") + } + } + + if err := f(nextEntry.Key); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", nextEntry.Key, err) + } + + delete(ttles.entries, nextEntry.Key) + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + case entry := <-ttles.addChan: + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + ttles.entries[entry.Key] = entry + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + break + + case <-ttles.stopChan: + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + ttles.stopped = true + } + } +} + +func nextExpiringEntry(entries map[string]schedulerEntry) (*schedulerEntry, time.Duration) { + if len(entries) == 0 { + return nil, 24 * time.Hour + } + + // todo:(richardscothern) this is a primitive o(n) algorithm + // but n will never be *that* big and it's all in memory. Investigate + // time.AfterFunc for heap based expiries + + first := true + var nextEntry schedulerEntry + for _, entry := range entries { + if first { + nextEntry = entry + first = false + continue + } + if entry.Expiry.Before(nextEntry.Expiry) { + nextEntry = entry + } + } + + // Dates may be from the past if the scheduler has + // been restarted, set their ttl to 0 + if nextEntry.Expiry.Before(time.Now()) { + nextEntry.Expiry = time.Now() + return &nextEntry, 0 + } + + return &nextEntry, nextEntry.Expiry.Sub(time.Now()) +} + +func (ttles *TTLExpirationScheduler) writeState() error { + jsonBytes, err := json.Marshal(ttles.entries) + if err != nil { + return err + } + + err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) + if err != nil { + return err + } + return nil +} + +func (ttles *TTLExpirationScheduler) readState() error { + if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + return nil + default: + return err + } + } + + bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, &ttles.entries) + if err != nil { + return err + } + + return nil +} diff --git a/docs/proxy/scheduler/scheduler_test.go b/docs/proxy/scheduler/scheduler_test.go new file mode 100644 index 00000000..fb5479f0 --- /dev/null +++ b/docs/proxy/scheduler/scheduler_test.go @@ -0,0 +1,165 @@ +package scheduler + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestSchedule(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + "ch00": true, + } + + s := New(context.Background(), inmemory.New(), "/ttl") + deleteFunc := func(repoName string) error { + if len(remainingRepos) == 0 { + t.Fatalf("Incorrect expiry count") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + fmt.Println("removing", repoName) + delete(remainingRepos, repoName) + + return nil + } + s.onBlobExpire = deleteFunc + err := s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + s.add("testBlob1", 3*timeUnit, entryTypeBlob) + s.add("testBlob2", 1*timeUnit, entryTypeBlob) + + func() { + s.add("ch00", 1*timeUnit, entryTypeBlob) + + }() + + // Ensure all repos are deleted + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestRestoreOld(t *testing.T) { + remainingRepos := map[string]bool{ + "testBlob1": true, + "oldRepo": true, + } + + deleteFunc := func(repoName string) error { + if repoName == "oldRepo" && len(remainingRepos) == 3 { + t.Errorf("oldRepo should be removed first") + } + _, ok := remainingRepos[repoName] + if !ok { + t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + } + delete(remainingRepos, repoName) + return nil + } + + timeUnit := time.Millisecond + serialized, err := json.Marshal(&map[string]schedulerEntry{ + "testBlob1": { + Expiry: time.Now().Add(1 * timeUnit), + Key: "testBlob1", + EntryType: 0, + }, + "oldRepo": { + Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first + Key: "oldRepo", + EntryType: 0, + }, + }) + if err != nil { + t.Fatalf("Error serializing test data: %s", err.Error()) + } + + ctx := context.Background() + pathToStatFile := "/ttl" + fs := inmemory.New() + err = fs.PutContent(ctx, pathToStatFile, serialized) + if err != nil { + t.Fatal("Unable to write serialized data to fs") + } + s := New(context.Background(), fs, "/ttl") + s.onBlobExpire = deleteFunc + err = s.start() + if err != nil { + t.Fatalf("Error starting ttlExpirationScheduler: %s", err) + } + + <-time.After(50 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } +} + +func TestStopRestore(t *testing.T) { + timeUnit := time.Millisecond + remainingRepos := map[string]bool{ + "testBlob1": true, + "testBlob2": true, + } + deleteFunc := func(repoName string) error { + delete(remainingRepos, repoName) + return nil + } + + fs := inmemory.New() + pathToStateFile := "/ttl" + s := New(context.Background(), fs, pathToStateFile) + s.onBlobExpire = deleteFunc + + err := s.start() + if err != nil { + t.Fatalf(err.Error()) + } + s.add("testBlob1", 300*timeUnit, entryTypeBlob) + s.add("testBlob2", 100*timeUnit, entryTypeBlob) + + // Start and stop before all operations complete + // state will be written to fs + s.stop() + time.Sleep(10 * time.Millisecond) + + // v2 will restore state from fs + s2 := New(context.Background(), fs, pathToStateFile) + s2.onBlobExpire = deleteFunc + err = s2.start() + if err != nil { + t.Fatalf("Error starting v2: %s", err.Error()) + } + + <-time.After(500 * timeUnit) + if len(remainingRepos) != 0 { + t.Fatalf("Repositories remaining: %#v", remainingRepos) + } + +} + +func TestDoubleStart(t *testing.T) { + s := New(context.Background(), inmemory.New(), "/ttl") + err := s.start() + if err != nil { + t.Fatalf("Unable to start scheduler") + } + fmt.Printf("%#v", s) + err = s.start() + if err == nil { + t.Fatalf("Scheduler started twice without error") + } +} diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 7719bab1..a0020ed8 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -33,7 +33,7 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -193,7 +193,7 @@ func TestSimpleBlobUpload(t *testing.T) { } // Reuse state to test delete with a delete-disabled registry - registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) + registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) repository, err = registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -212,7 +212,7 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -316,7 +316,7 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 50da7699..2142c37f 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -31,6 +31,8 @@ type blobWriter struct { // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface bufferedFileWriter + + resumableDigestEnabled bool } var _ distribution.BlobWriter = &blobWriter{} @@ -349,3 +351,29 @@ func (bw *blobWriter) removeResources(ctx context.Context) error { return nil } + +func (bw *blobWriter) Reader() (io.ReadCloser, error) { + // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 + try := 1 + for try <= 5 { + _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + if err == nil { + break + } + switch err.(type) { + case storagedriver.PathNotFoundError: + context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + time.Sleep(1 * time.Second) + try++ + default: + return nil, err + } + } + + readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + if err != nil { + return nil, err + } + + return readCloser, nil +} diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index c2ab2123..a26ac2cc 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -24,6 +24,10 @@ import ( // offset. Any unhashed bytes remaining less than the given offset are hashed // from the content uploaded so far. func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + if offset < 0 { return fmt.Errorf("cannot resume hash at negative offset: %d", offset) } @@ -143,6 +147,10 @@ func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry } func (bw *blobWriter) storeHashState(ctx context.Context) error { + if !bw.resumableDigestEnabled { + return errResumableDigestNotAvailable + } + h, ok := bw.digester.Hash().(resumable.Hash) if !ok { return errResumableDigestNotAvailable diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index 862777aa..1a1dbac5 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -22,7 +22,7 @@ func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() - registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) + registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) repos := []string{ diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index e7a98bbb..2ba62a95 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -16,11 +16,12 @@ import ( // that grant access to the global blob store. type linkedBlobStore struct { *blobStore - blobServer distribution.BlobServer - blobAccessController distribution.BlobDescriptorService - repository distribution.Repository - ctx context.Context // only to be used where context can't come through method args - deleteEnabled bool + blobServer distribution.BlobServer + blobAccessController distribution.BlobDescriptorService + repository distribution.Repository + ctx context.Context // only to be used where context can't come through method args + deleteEnabled bool + resumableDigestEnabled bool // linkPath allows one to control the repository blob link set to which // the blob store dispatches. This is required because manifest and layer @@ -189,11 +190,12 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string } bw := &blobWriter{ - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - bufferedFileWriter: *fw, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + bufferedFileWriter: *fw, + resumableDigestEnabled: lbs.resumableDigestEnabled, } return bw, nil diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 5bbbd4a2..a4ce9149 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -29,7 +29,7 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true) + registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) repo, err := registry.Repository(ctx, name) if err != nil { @@ -348,7 +348,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest get returned non-nil") } - r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true) + r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) repo, err := r.Repository(ctx, env.name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 8149be11..c5058b80 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -16,6 +16,7 @@ type registry struct { statter distribution.BlobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool + resumableDigestEnabled bool } // NewRegistryWithDriver creates a new registry instance from the provided @@ -23,9 +24,9 @@ type registry struct { // cheap to allocate. If redirect is true, the backend blob server will // attempt to use (StorageDriver).URLFor to serve all blobs. // -// TODO(stevvooe): This function signature is getting out of hand. Move to +// TODO(stevvooe): This function signature is getting very out of hand. Move to // functional options for instance configuration. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool) distribution.Namespace { +func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool, isCache bool) distribution.Namespace { // create global statter, with cache. var statter distribution.BlobDescriptorService = &blobStatter{ driver: driver, @@ -52,6 +53,7 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv }, blobDescriptorCacheProvider: blobDescriptorCacheProvider, deleteEnabled: deleteEnabled, + resumableDigestEnabled: !isCache, } } diff --git a/docs/storage/vacuum.go b/docs/storage/vacuum.go new file mode 100644 index 00000000..46b8096b --- /dev/null +++ b/docs/storage/vacuum.go @@ -0,0 +1,67 @@ +package storage + +import ( + "path" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/driver" +) + +// vacuum contains functions for cleaning up repositories and blobs +// These functions will only reliably work on strongly consistent +// storage systems. +// https://en.wikipedia.org/wiki/Consistency_model + +// NewVacuum creates a new Vacuum +func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { + return Vacuum{ + ctx: ctx, + driver: driver, + pm: defaultPathMapper, + } +} + +// Vacuum removes content from the filesystem +type Vacuum struct { + pm *pathMapper + driver driver.StorageDriver + ctx context.Context +} + +// RemoveBlob removes a blob from the filesystem +func (v Vacuum) RemoveBlob(dgst string) error { + d, err := digest.ParseDigest(dgst) + if err != nil { + return err + } + + blobPath, err := v.pm.path(blobDataPathSpec{digest: d}) + if err != nil { + return err + } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) + if err != nil { + return err + } + + return nil +} + +// RemoveRepository removes a repository directory from the +// filesystem +func (v Vacuum) RemoveRepository(repoName string) error { + rootForRepository, err := v.pm.path(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(rootForRepository, repoName) + context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + err = v.driver.Delete(v.ctx, repoDir) + if err != nil { + return err + } + + return nil +} From e7435725af6e12525d5e85a23302290453e9c35c Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 6 Aug 2015 15:14:44 -0700 Subject: [PATCH 259/501] Don't panic when a http.ResponseWriter does not implement CloseNotifier Instead, provide a variant of instrumentedResponseWriter that does not implement CloseNotifier, and use that when necessary. In copyFullPayload, log instead of panicing when we encounter something that doesn't implement CloseNotifier. This is more complicated than I'd like, but it's necessary because instrumentedResponseWriter must not embed CloseNotifier unless there's really a CloseNotifier to embed. Signed-off-by: Aaron Lehmann --- docs/handlers/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 1f9a8ee1..a4f3abcc 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -29,7 +29,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr if notifier, ok := responseWriter.(http.CloseNotifier); ok { clientClosed = notifier.CloseNotify() } else { - panic("the ResponseWriter does not implement CloseNotifier") + ctxu.GetLogger(context).Warn("the ResponseWriter does not implement CloseNotifier") } // Read in the data, if any. From 9bf231e0fa158d2c7ae11a2636a97a7d1b5c6a30 Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Mon, 10 Aug 2015 23:11:17 +0200 Subject: [PATCH 260/501] fix(rados): Create OMAP for root directory When using the RADOS driver, the hierarchy of the files is stored in OMAPs, but the root OMAP was not created and a call to List("/") was returning an error instead of returned the first level files stored. This patches creates an OMAP for "/" and excludes the listed directory from the list of files returned. Signed-off-by: Vincent Giersch --- docs/storage/driver/rados/rados.go | 8 +++++--- docs/storage/driver/testsuites/testsuites.go | 8 ++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index 0ea10a89..b2e6590d 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -409,7 +409,9 @@ func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { keys := make([]string, 0, len(files)) for k := range files { - keys = append(keys, path.Join(dirPath, k)) + if k != dirPath { + keys = append(keys, path.Join(dirPath, k)) + } } return keys, nil @@ -528,7 +530,7 @@ func (d *driver) putOid(objectPath string, oid string) error { } // Esure parent virtual directories - if createParentReference && directory != "/" { + if createParentReference { return d.putOid(directory, "") } @@ -581,7 +583,7 @@ func (d *driver) deleteOid(objectPath string) error { } // Remove reference on parent omaps - if directory != "/" { + if directory != "" { return d.deleteOid(directory) } } diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 770c428c..1772560b 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -87,6 +87,14 @@ func (suite *DriverSuite) TearDownTest(c *check.C) { } } +// TestRootExists ensures that all storage drivers have a root path by default. +func (suite *DriverSuite) TestRootExists(c *check.C) { + _, err := suite.StorageDriver.List(suite.ctx, "/") + if err != nil { + c.Fatalf(`the root path "/" should always exist: %v`, err) + } +} + // TestValidPaths checks that various valid file paths are accepted by the // storage driver. func (suite *DriverSuite) TestValidPaths(c *check.C) { From 288c46e99899ba0e3a8851f5c02a6660eb63ef17 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 6 Aug 2015 15:28:11 -0700 Subject: [PATCH 261/501] Provide simple storage driver health check To ensure the ensure the web application is properly operating, we've added a periodic health check for the storage driver. If the health check fails three times in a row, the registry will serve 503 response status for any request until the condition is resolved. The condition is reported in the response body and via the /debug/health endpoint. To ensure that all drivers will properly operate with this health check, a function has been added to the driver testsuite. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f60290d0..ab33e8a6 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -14,6 +14,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" "github.com/docker/distribution/notifications" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -203,6 +204,20 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App return app } +// RegisterHealthChecks is an awful hack to defer health check registration +// control to callers. This should only ever be called once per registry +// process, typically in a main function. The correct way would be register +// health checks outside of app, since multiple apps may exist in the same +// process. Because the configuration and app are tightly coupled, +// implementing this properly will require a refactor. This method may panic +// if called twice in the same process. +func (app *App) RegisterHealthChecks() { + health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), 10*time.Second, 3, func() error { + _, err := app.driver.List(app, "/") // "/" should always exist + return err // any error will be treated as failure + }) +} + // register a handler with the application, by route name. The handler will be // passed through the application filters and context will be constructed at // request time. From ed3ecfdccbe8030657d383a2bfad65cd25cd4419 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 6 Aug 2015 16:25:08 -0700 Subject: [PATCH 262/501] Move common error codes to errcode package Several error codes are generally useful but tied to the v2 specification definitions. This change moves these error code definitions into the common package for use by the health package, which is not tied to the v2 API. Signed-off-by: Stephen J Day --- docs/api/errcode/register.go | 46 ++++++++++++++++++++++++++------ docs/api/v2/descriptors.go | 20 +++++++------- docs/api/v2/errors.go | 18 ------------- docs/client/errors.go | 3 +-- docs/client/repository_test.go | 5 ++-- docs/handlers/app.go | 4 +-- docs/handlers/app_test.go | 4 +-- docs/handlers/blob.go | 2 +- docs/handlers/images.go | 2 +- docs/proxy/proxymanifeststore.go | 6 ++--- 10 files changed, 60 insertions(+), 50 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index 42f911b3..e1c93f38 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -13,15 +13,45 @@ var ( groupToDescriptors = map[string][]ErrorDescriptor{} ) -// ErrorCodeUnknown is a generic error that can be used as a last -// resort if there is no situation-specific error message that can be used -var ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an API classification.`, - HTTPStatusCode: http.StatusInternalServerError, -}) + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeUnavailable provides a common error to report unavialability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) +) var nextCode = 1000 var registerLock sync.Mutex diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 0ef64f88..09289b96 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -124,7 +124,7 @@ var ( }, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -145,7 +145,7 @@ var ( }, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -374,7 +374,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, }, { @@ -451,7 +451,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, }, }, @@ -506,7 +506,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, }, }, @@ -568,7 +568,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, }, { @@ -645,7 +645,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, }, { @@ -682,7 +682,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -737,7 +737,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnauthorized, + errcode.ErrorCodeUnauthorized, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -974,7 +974,7 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ - ErrorCodeUnsupported, + errcode.ErrorCodeUnsupported, }, }, }, diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index 87e27f2e..ece52a2c 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -9,24 +9,6 @@ import ( const errGroup = "registry.api.v2" var ( - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - }) - - // ErrorCodeUnauthorized is returned if a request is not authorized. - ErrorCodeUnauthorized = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ diff --git a/docs/client/errors.go b/docs/client/errors.go index ebd1c36c..7305c021 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -8,7 +8,6 @@ import ( "net/http" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" ) // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is @@ -52,7 +51,7 @@ func handleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response) + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 26201763..8a7a598e 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -21,7 +21,6 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" ) @@ -782,10 +781,10 @@ func TestManifestUnauthorized(t *testing.T) { if !ok { t.Fatalf("Unexpected error type: %#v", err) } - if v2Err.Code != v2.ErrorCodeUnauthorized { + if v2Err.Code != errcode.ErrorCodeUnauthorized { t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) } - if expected := v2.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { + if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) } } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index ab33e8a6..11d91120 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -575,7 +575,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // base route is accessed. This section prevents us from making // that mistake elsewhere in the code, allowing any operation to // proceed. - if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized); err != nil { + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return fmt.Errorf("forbidden: no repository name") @@ -590,7 +590,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // Add the appropriate WWW-Auth header err.SetHeaders(w) - if err := errcode.ServeJSON(w, v2.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { + if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } default: diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 6f597527..3ef2342c 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -205,8 +205,8 @@ func TestNewApp(t *testing.T) { if !ok { t.Fatalf("not an ErrorCoder: %#v", errs[0]) } - if err2.ErrorCode() != v2.ErrorCodeUnauthorized { - t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), v2.ErrorCodeUnauthorized) + if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) } } diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index b7c06ea2..fd514ec0 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -81,7 +81,7 @@ func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) case distribution.ErrUnsupported: w.WriteHeader(http.StatusMethodNotAllowed) - bh.Errors = append(bh.Errors, v2.ErrorCodeUnsupported) + bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) default: bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown) } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index dbe7b706..f5354399 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -213,7 +213,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h w.WriteHeader(http.StatusNotFound) return case distribution.ErrUnsupported: - imh.Errors = append(imh.Errors, v2.ErrorCodeUnsupported) + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) w.WriteHeader(http.StatusMethodNotAllowed) default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 5b79c8ce..8921998a 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -7,7 +7,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -147,9 +147,9 @@ func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { } func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { - return v2.ErrorCodeUnsupported + return errcode.ErrorCodeUnsupported } func (pms proxyManifestStore) Delete(dgst digest.Digest) error { - return v2.ErrorCodeUnsupported + return errcode.ErrorCodeUnsupported } From 43fc9a195d28f7c0c9d9d288c5f018efbd40f984 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 11 Aug 2015 11:00:30 -0700 Subject: [PATCH 263/501] Change some incorrect error types in proxy stores from API errors to distribution errors. Fill in missing checks for mutations on a registry pull-through cache. Add unit tests and update documentation. Also, give v2.ErrorCodeUnsupported an HTTP status code, previously it was defaulting to 500, now its 405 Method Not Allowed. Signed-off-by: Richard Scothern --- docs/api/errcode/register.go | 2 +- docs/api/v2/descriptors.go | 27 ++++++++++++- docs/handlers/api_test.go | 65 ++++++++++++++++++++++++++++++++ docs/handlers/blob.go | 13 ++++--- docs/handlers/blobupload.go | 9 ++++- docs/handlers/images.go | 8 ++-- docs/proxy/proxymanifeststore.go | 5 +-- 7 files changed, 114 insertions(+), 15 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index e1c93f38..f3062ffa 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -30,7 +30,7 @@ var ( Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusBadRequest, + HTTPStatusCode: http.StatusMethodNotAllowed, }) // ErrorCodeUnauthorized is returned if a request is not authorized. diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 09289b96..c5630fed 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -689,6 +689,14 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, }, @@ -757,6 +765,14 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, }, @@ -967,7 +983,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, { - Description: "Delete is not enabled on the registry", + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", @@ -1051,6 +1067,14 @@ var routeDescriptors = []RouteDescriptor{ }, }, unauthorizedResponsePush, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, }, }, { @@ -1389,6 +1413,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index c484835f..4c700e06 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1001,6 +1001,21 @@ type testEnv struct { builder *v2.URLBuilder } +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ @@ -1378,3 +1393,53 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) "Docker-Content-Digest": []string{dgst.String()}, }) } + +// Test mutation operations on a registry configured as a cache. Ensure that they return +// appropriate errors. +func TestRegistryAsCacheMutationAPIs(t *testing.T) { + deleteEnabled := true + env := newTestEnvMirror(t, deleteEnabled) + + imageName := "foo/bar" + tag := "latest" + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + // Manifest upload + unsignedManifest := &manifest.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: imageName, + Tag: tag, + FSLayers: []manifest.FSLayer{}, + } + resp := putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Manifest Delete + resp, err = httpDelete(manifestURL) + checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob upload initialization + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + + // Blob Delete + blobURL, err := env.builder.BuildBlobURL(imageName, digest.DigestSha256EmptyTar) + resp, err = httpDelete(blobURL) + checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) + +} diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index fd514ec0..4a923aa5 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -76,16 +76,17 @@ func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { err := blobs.Delete(bh, bh.Digest) if err != nil { switch err { - case distribution.ErrBlobUnknown: - w.WriteHeader(http.StatusNotFound) - bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) case distribution.ErrUnsupported: - w.WriteHeader(http.StatusMethodNotAllowed) bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) + return + case distribution.ErrBlobUnknown: + bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) + return default: - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown) + bh.Errors = append(bh.Errors, err) + context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) + return } - return } w.Header().Set("Content-Length", "0") diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 1d1c1009..bbb70b59 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -117,8 +117,13 @@ type blobUploadHandler struct { func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh) + if err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + if err == distribution.ErrUnsupported { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } return } @@ -227,6 +232,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) default: switch err { + case distribution.ErrUnsupported: + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) default: diff --git a/docs/handlers/images.go b/docs/handlers/images.go index f5354399..f4f0db89 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -154,6 +154,10 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http if err := manifests.Put(&manifest); err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. + if err == distribution.ErrUnsupported { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) + return + } switch err := err.(type) { case distribution.ErrManifestVerification: for _, verificationError := range err { @@ -210,14 +214,12 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h return case distribution.ErrBlobUnknown: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - w.WriteHeader(http.StatusNotFound) return case distribution.ErrUnsupported: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) - w.WriteHeader(http.StatusMethodNotAllowed) + return default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) - w.WriteHeader(http.StatusBadRequest) return } } diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 8921998a..e314e84f 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -7,7 +7,6 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -147,9 +146,9 @@ func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { } func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { - return errcode.ErrorCodeUnsupported + return distribution.ErrUnsupported } func (pms proxyManifestStore) Delete(dgst digest.Digest) error { - return errcode.ErrorCodeUnsupported + return distribution.ErrUnsupported } From d9a20377f342308a4f1413b4db0020107009a48f Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 10 Aug 2015 14:20:52 -0700 Subject: [PATCH 264/501] Add a section to the config file for HTTP headers to add to responses The example configuration files add X-Content-Type-Options: nosniff. Add coverage in existing registry/handlers unit tests. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 15 +++++++++++++++ docs/handlers/app.go | 6 ++++++ 2 files changed, 21 insertions(+) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index c484835f..0e192449 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -30,6 +30,10 @@ import ( "golang.org/x/net/context" ) +var headerConfig = http.Header{ + "X-Content-Type-Options": []string{"nosniff"}, +} + // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { @@ -215,6 +219,7 @@ func TestURLPrefix(t *testing.T) { }, } config.HTTP.Prefix = "/test/" + config.HTTP.Headers = headerConfig env := newTestEnvWithConfig(t, &config) @@ -1009,6 +1014,8 @@ func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { }, } + config.HTTP.Headers = headerConfig + return newTestEnvWithConfig(t, &config) } @@ -1225,6 +1232,14 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus t.FailNow() } + + // We expect the headers included in the configuration + if !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { + t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) + maybeDumpResponse(t, resp) + + t.FailNow() + } } // checkBodyHasErrorCodes ensures the body is an error body and has the diff --git a/docs/handlers/app.go b/docs/handlers/app.go index f60290d0..7b0fe6c2 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -428,6 +428,12 @@ type dispatchFunc func(ctx *Context, r *http.Request) http.Handler // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerName, headerValues := range app.Config.HTTP.Headers { + for _, value := range headerValues { + w.Header().Add(headerName, value) + } + } + context := app.context(w, r) if err := app.authorized(w, r, context); err != nil { From 5dd78c821aab8b1c85e10f6f953642900b02e37e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 12 Aug 2015 13:07:57 -0700 Subject: [PATCH 265/501] Use correct path for manifest revision path Unfortunately, the refactor used the incorrect path for manifest links within a repository. While this didn't stop the registry from working, it did break compatibility with 2.0 deployments for manifest fetches. Tests were added to ensure these are locked down to the appropriate paths. Signed-off-by: Stephen J Day --- docs/storage/linkedblobstore.go | 6 +++++- docs/storage/manifeststore_test.go | 34 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 2ba62a95..d8252e5d 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -11,6 +11,10 @@ import ( "github.com/docker/distribution/uuid" ) +// linkPathFunc describes a function that can resolve a link based on the +// repository name and digest. +type linkPathFunc func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + // linkedBlobStore provides a full BlobService that namespaces the blobs to a // given repository. Effectively, it manages the links in a given repository // that grant access to the global blob store. @@ -297,5 +301,5 @@ func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, erro // manifestRevisionLinkPath provides the path to the manifest revision link. func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(layerLinkPathSpec{name: name, digest: dgst}) + return pm.path(manifestRevisionLinkPathSpec{name: name, revision: dgst}) } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index a4ce9149..0bb72fb0 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -362,3 +362,37 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Unexpected success deleting while disabled") } } + +// TestLinkPathFuncs ensures that the link path functions behavior are locked +// down and implemented as expected. +func TestLinkPathFuncs(t *testing.T) { + for _, testcase := range []struct { + repo string + digest digest.Digest + linkPathFn linkPathFunc + expected string + }{ + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: blobLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf/link", + }, + { + repo: "foo/bar", + digest: "sha256:deadbeaf", + linkPathFn: manifestRevisionLinkPath, + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", + }, + } { + p, err := testcase.linkPathFn(defaultPathMapper, testcase.repo, testcase.digest) + if err != nil { + t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) + } + + if p != testcase.expected { + t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) + } + } + +} From 5878a8f401ef5a4d32cff2d85ef107680bd7ed97 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 12 Aug 2015 13:11:13 -0700 Subject: [PATCH 266/501] Maintain manifest link compatibility Unfortunately, the 2.1 releease has written manfiest links into the wrong directory. This doesn't affect new 2.1 deployments but fixing this to be 2.0 backwards compatible has broken 2.1.0 compatibility. To ensure we have compatibility between 2.0, 2.1.0 and future releases, we now check one of several locations to identify a manifest link. Signed-off-by: Stephen J Day --- docs/storage/linkedblobstore.go | 92 +++++++++++++++++++++++---------- docs/storage/registry.go | 23 ++++++--- docs/storage/signaturestore.go | 8 +-- docs/storage/tagstore.go | 7 ++- 4 files changed, 88 insertions(+), 42 deletions(-) diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index d8252e5d..dc670542 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -27,11 +27,13 @@ type linkedBlobStore struct { deleteEnabled bool resumableDigestEnabled bool - // linkPath allows one to control the repository blob link set to which - // the blob store dispatches. This is required because manifest and layer - // blobs have not yet been fully merged. At some point, this functionality - // should be removed an the blob links folder should be merged. - linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc } var _ distribution.BlobStore = &linkedBlobStore{} @@ -217,13 +219,16 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution // Don't make duplicate links. seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) + // only use the first link + linkPathFn := lbs.linkPathFns[0] + for _, dgst := range dgsts { if _, seen := seenDigests[dgst]; seen { continue } seenDigests[dgst] = struct{}{} - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) if err != nil { return err } @@ -240,33 +245,43 @@ type linkedBlobStatter struct { *blobStore repository distribution.Repository - // linkPath allows one to control the repository blob link set to which - // the blob store dispatches. This is required because manifest and layer - // blobs have not yet been fully merged. At some point, this functionality - // should be removed an the blob links folder should be merged. - linkPath func(pm *pathMapper, name string, dgst digest.Digest) (string, error) + // linkPathFns specifies one or more path functions allowing one to + // control the repository blob link set to which the blob store + // dispatches. This is required because manifest and layer blobs have not + // yet been fully merged. At some point, this functionality should be + // removed an the blob links folder should be merged. The first entry is + // treated as the "canonical" link location and will be used for writes. + linkPathFns []linkPathFunc } var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) - if err != nil { - return distribution.Descriptor{}, err - } + var ( + resolveErr error + target digest.Digest + ) + + // try the many link path functions until we get success or an error that + // is not PathNotFoundError. + for _, linkPathFn := range lbs.linkPathFns { + var err error + target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) + + if err == nil { + break // success! + } - target, err := lbs.blobStore.readlink(ctx, blobLinkPath) - if err != nil { switch err := err.(type) { case driver.PathNotFoundError: - return distribution.Descriptor{}, distribution.ErrBlobUnknown + resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error default: return distribution.Descriptor{}, err } + } - // TODO(stevvooe): For backwards compatibility with data in "_layers", we - // need to hit layerLinkPath, as well. Or, somehow migrate to the new path - // layout. + if resolveErr != nil { + return distribution.Descriptor{}, resolveErr } if target != dgst { @@ -280,13 +295,38 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis return lbs.blobStore.statter.Stat(ctx, target) } -func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobLinkPath, err := lbs.linkPath(lbs.pm, lbs.repository.Name(), dgst) - if err != nil { - return err +func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { + // clear any possible existence of a link described in linkPathFns + for _, linkPathFn := range lbs.linkPathFns { + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return err + } + + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue // just ignore this error and continue + default: + return err + } + } } - return lbs.blobStore.driver.Delete(ctx, blobLinkPath) + return nil +} + +// resolveTargetWithFunc allows us to read a link to a resource with different +// linkPathFuncs to let us try a few different paths before returning not +// found. +func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { + blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + if err != nil { + return "", err + } + + return lbs.blobStore.readlink(ctx, blobLinkPath) } func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { diff --git a/docs/storage/registry.go b/docs/storage/registry.go index c5058b80..b6e0ba4d 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -108,6 +108,13 @@ func (repo *repository) Name() string { // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + manifestLinkPathFns := []linkPathFunc{ + // NOTE(stevvooe): Need to search through multiple locations since + // 2.1.0 unintentionally linked into _layers. + manifestRevisionLinkPath, + blobLinkPath, + } + ms := &manifestStore{ ctx: ctx, repository: repo, @@ -120,14 +127,14 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M repository: repo, deleteEnabled: repo.registry.deleteEnabled, blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPath: manifestRevisionLinkPath, + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, }, // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. - linkPath: manifestRevisionLinkPath, + linkPathFns: manifestLinkPathFns, }, }, tagStore: &tagStore{ @@ -153,9 +160,9 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // to a request local. func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPath: blobLinkPath, + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, } if repo.descriptorCache != nil { @@ -171,7 +178,7 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. - linkPath: blobLinkPath, + linkPathFns: []linkPathFunc{blobLinkPath}, deleteEnabled: repo.registry.deleteEnabled, } } diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index 78fd2e6c..105d66f3 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -132,10 +132,10 @@ func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Di repository: s.repository, blobStore: s.blobStore, blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPath: linkpath, + blobStore: s.blobStore, + repository: s.repository, + linkPathFns: []linkPathFunc{linkpath}, }, - linkPath: linkpath, + linkPathFns: []linkPathFunc{linkpath}, } } diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index a74d9b09..a7ca3301 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -122,7 +122,7 @@ func (ts *tagStore) delete(tag string) error { return ts.blobStore.driver.Delete(ts.ctx, tagPath) } -// namedBlobStore returns the namedBlobStore for the named tag, allowing one +// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one // to index manifest blobs by tag name. While the tag store doesn't map // precisely to the linked blob store, using this ensures the links are // managed via the same code path. @@ -131,13 +131,12 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob blobStore: ts.blobStore, repository: ts.repository, ctx: ctx, - linkPath: func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { + linkPathFns: []linkPathFunc{func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { return pm.path(manifestTagIndexEntryLinkPathSpec{ name: name, tag: tag, revision: dgst, }) - }, + }}, } - } From 614e8c8277275d01d9f8de950a4569ccc08de284 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 17 Aug 2015 18:51:05 -0700 Subject: [PATCH 267/501] Remove pathMapper object The use of the pathMapper is no longer needed the way we have organized the code base. The extra level of indirection has proved unnecessary and confusing so we've opted to clean it up. In the future, we may require more flexibility, but now it is simply not required. Signed-off-by: Stephen J Day --- docs/storage/blobstore.go | 7 ++-- docs/storage/blobwriter.go | 4 +-- docs/storage/blobwriter_resumable.go | 6 ++-- docs/storage/catalog.go | 2 +- docs/storage/catalog_test.go | 2 +- docs/storage/linkedblobstore.go | 24 ++++++------- docs/storage/manifeststore_test.go | 2 +- docs/storage/paths.go | 53 +++++++++++++++------------- docs/storage/paths_test.go | 39 ++++++++++---------- docs/storage/purgeuploads.go | 3 +- docs/storage/purgeuploads_test.go | 8 ++--- docs/storage/registry.go | 2 -- docs/storage/signaturestore.go | 7 ++-- docs/storage/tagstore.go | 19 ++++++---- docs/storage/vacuum.go | 6 ++-- 15 files changed, 93 insertions(+), 91 deletions(-) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 724617f8..f6a8ac43 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -13,7 +13,6 @@ import ( // creating and traversing backend links. type blobStore struct { driver driver.StorageDriver - pm *pathMapper statter distribution.BlobStatter } @@ -94,7 +93,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr // path returns the canonical path for the blob identified by digest. The blob // may or may not exist. func (bs *blobStore) path(dgst digest.Digest) (string, error) { - bp, err := bs.pm.path(blobDataPathSpec{ + bp, err := pathFor(blobDataPathSpec{ digest: dgst, }) @@ -140,7 +139,6 @@ func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { type blobStatter struct { driver driver.StorageDriver - pm *pathMapper } var _ distribution.BlobDescriptorService = &blobStatter{} @@ -149,9 +147,10 @@ var _ distribution.BlobDescriptorService = &blobStatter{} // in the main blob store. If this method returns successfully, there is // strong guarantee that the blob exists and is available. func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - path, err := bs.pm.path(blobDataPathSpec{ + path, err := pathFor(blobDataPathSpec{ digest: dgst, }) + if err != nil { return distribution.Descriptor{}, err } diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 2142c37f..e0e7239c 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -266,7 +266,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // identified by dgst. The layer should be validated before commencing the // move. func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { - blobPath, err := bw.blobStore.pm.path(blobDataPathSpec{ + blobPath, err := pathFor(blobDataPathSpec{ digest: desc.Digest, }) @@ -324,7 +324,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // instance. An error will be returned if the clean up cannot proceed. If the // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { - dataPath, err := bw.blobStore.pm.path(uploadDataPathSpec{ + dataPath, err := pathFor(uploadDataPathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, }) diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index a26ac2cc..26d3beab 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -111,12 +111,13 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { - uploadHashStatePathPrefix, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, }) + if err != nil { return nil, err } @@ -156,12 +157,13 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { return errResumableDigestNotAvailable } - uploadHashStatePath, err := bw.blobStore.pm.path(uploadHashStatePathSpec{ + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), }) + if err != nil { return err } diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go index 470894b7..b6768012 100644 --- a/docs/storage/catalog.go +++ b/docs/storage/catalog.go @@ -22,7 +22,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, errors.New("no space in slice") } - root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return 0, err } diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index 1a1dbac5..ed96f50c 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -23,7 +23,7 @@ func setupFS(t *testing.T) *setupEnv { c := []byte("") ctx := context.Background() registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) - rootpath, _ := defaultPathMapper.path(repositoriesRootPathSpec{}) + rootpath, _ := pathFor(repositoriesRootPathSpec{}) repos := []string{ "/foo/a/_layers/1", diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index dc670542..f01088ba 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -13,7 +13,7 @@ import ( // linkPathFunc describes a function that can resolve a link based on the // repository name and digest. -type linkPathFunc func(pm *pathMapper, name string, dgst digest.Digest) (string, error) +type linkPathFunc func(name string, dgst digest.Digest) (string, error) // linkedBlobStore provides a full BlobService that namespaces the blobs to a // given repository. Effectively, it manages the links in a given repository @@ -104,7 +104,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter uuid := uuid.Generate().String() startedAt := time.Now().UTC() - path, err := lbs.blobStore.pm.path(uploadDataPathSpec{ + path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name(), id: uuid, }) @@ -113,7 +113,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter return nil, err } - startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name(), id: uuid, }) @@ -133,7 +133,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") - startedAtPath, err := lbs.blobStore.pm.path(uploadStartedAtPathSpec{ + startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name(), id: id, }) @@ -157,7 +157,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution return nil, err } - path, err := lbs.pm.path(uploadDataPathSpec{ + path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name(), id: id, }) @@ -228,7 +228,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution } seenDigests[dgst] = struct{}{} - blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) if err != nil { return err } @@ -298,7 +298,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) if err != nil { return err } @@ -321,7 +321,7 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.pm, lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) if err != nil { return "", err } @@ -335,11 +335,11 @@ func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Dig } // blobLinkPath provides the path to the blob link, also known as layers. -func blobLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(layerLinkPathSpec{name: name, digest: dgst}) +func blobLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(layerLinkPathSpec{name: name, digest: dgst}) } // manifestRevisionLinkPath provides the path to the manifest revision link. -func manifestRevisionLinkPath(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(manifestRevisionLinkPathSpec{name: name, revision: dgst}) +func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 0bb72fb0..4ad74820 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -385,7 +385,7 @@ func TestLinkPathFuncs(t *testing.T) { expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", }, } { - p, err := testcase.linkPathFn(defaultPathMapper, testcase.repo, testcase.digest) + p, err := testcase.linkPathFn(testcase.repo, testcase.digest) if err != nil { t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 35debddf..e90a1993 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -8,10 +8,18 @@ import ( "github.com/docker/distribution/digest" ) -const storagePathVersion = "v2" +const ( + storagePathVersion = "v2" // fixed storage layout version + storagePathRoot = "/docker/registry/" // all driver paths have a prefix -// pathMapper maps paths based on "object names" and their ids. The "object -// names" mapped by pathMapper are internal to the storage system. + // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though + // storage path root would configurable for all drivers through this + // package. In reality, we've found it simpler to do this on a per driver + // basis. +) + +// pathFor maps paths based on "object names" and their ids. The "object +// names" mapped by are internal to the storage system. // // The path layout in the storage backend is roughly as follows: // @@ -37,7 +45,7 @@ const storagePathVersion = "v2" // -> blob/ // // -// The storage backend layout is broken up into a content- addressable blob +// The storage backend layout is broken up into a content-addressable blob // store and repositories. The content-addressable blob store holds most data // throughout the backend, keyed by algorithm and digests of the underlying // content. Access to the blob store is controled through links from the @@ -98,18 +106,7 @@ const storagePathVersion = "v2" // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. -type pathMapper struct { - root string - version string // should be a constant? -} - -var defaultPathMapper = &pathMapper{ - root: "/docker/registry/", - version: storagePathVersion, -} - -// path returns the path identified by spec. -func (pm *pathMapper) path(spec pathSpec) (string, error) { +func pathFor(spec pathSpec) (string, error) { // Switch on the path object type and return the appropriate path. At // first glance, one may wonder why we don't use an interface to @@ -123,7 +120,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { // to an intermediate path object, than can be consumed and mapped by the // other version. - rootPrefix := []string{pm.root, pm.version} + rootPrefix := []string{storagePathRoot, storagePathVersion} repoPrefix := append(rootPrefix, "repositories") switch v := spec.(type) { @@ -136,7 +133,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil case manifestRevisionLinkPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ + root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) @@ -147,7 +144,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(root, "link"), nil case manifestSignaturesPathSpec: - root, err := pm.path(manifestRevisionPathSpec{ + root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) @@ -158,10 +155,11 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return path.Join(root, "signatures"), nil case manifestSignatureLinkPathSpec: - root, err := pm.path(manifestSignaturesPathSpec{ + root, err := pathFor(manifestSignaturesPathSpec{ name: v.name, revision: v.revision, }) + if err != nil { return "", err } @@ -175,50 +173,55 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { case manifestTagsPathSpec: return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil case manifestTagPathSpec: - root, err := pm.path(manifestTagsPathSpec{ + root, err := pathFor(manifestTagsPathSpec{ name: v.name, }) + if err != nil { return "", err } return path.Join(root, v.tag), nil case manifestTagCurrentPathSpec: - root, err := pm.path(manifestTagPathSpec{ + root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } return path.Join(root, "current", "link"), nil case manifestTagIndexPathSpec: - root, err := pm.path(manifestTagPathSpec{ + root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } return path.Join(root, "index"), nil case manifestTagIndexEntryLinkPathSpec: - root, err := pm.path(manifestTagIndexEntryPathSpec{ + root, err := pathFor(manifestTagIndexEntryPathSpec{ name: v.name, tag: v.tag, revision: v.revision, }) + if err != nil { return "", err } return path.Join(root, "link"), nil case manifestTagIndexEntryPathSpec: - root, err := pm.path(manifestTagIndexPathSpec{ + root, err := pathFor(manifestTagIndexPathSpec{ name: v.name, tag: v.tag, }) + if err != nil { return "", err } diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 3d17b377..9e91a3fa 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -7,10 +7,6 @@ import ( ) func TestPathMapper(t *testing.T) { - pm := &pathMapper{ - root: "/pathmapper-test", - } - for _, testcase := range []struct { spec pathSpec expected string @@ -21,14 +17,14 @@ func TestPathMapper(t *testing.T) { name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", }, { spec: manifestRevisionLinkPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", }, { spec: manifestSignatureLinkPathSpec{ @@ -36,41 +32,41 @@ func TestPathMapper(t *testing.T) { revision: "sha256:abcdef0123456789", signature: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", }, { spec: manifestSignaturesPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", }, { spec: manifestTagsPathSpec{ name: "foo/bar", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", }, { spec: manifestTagPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", }, { spec: manifestTagCurrentPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/current/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", }, { spec: manifestTagIndexPathSpec{ name: "foo/bar", tag: "thetag", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", }, { spec: manifestTagIndexEntryPathSpec{ @@ -78,7 +74,7 @@ func TestPathMapper(t *testing.T) { tag: "thetag", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", }, { spec: manifestTagIndexEntryLinkPathSpec{ @@ -86,26 +82,26 @@ func TestPathMapper(t *testing.T) { tag: "thetag", revision: "sha256:abcdef0123456789", }, - expected: "/pathmapper-test/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", }, { spec: layerLinkPathSpec{ name: "foo/bar", digest: "tarsum.v1+test:abcdef", }, - expected: "/pathmapper-test/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", + expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", }, { spec: blobDataPathSpec{ digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), }, - expected: "/pathmapper-test/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", }, { spec: blobDataPathSpec{ digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), }, - expected: "/pathmapper-test/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", }, { @@ -113,17 +109,17 @@ func TestPathMapper(t *testing.T) { name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", }, { spec: uploadStartedAtPathSpec{ name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, - expected: "/pathmapper-test/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", + expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", }, } { - p, err := pm.path(testcase.spec) + p, err := pathFor(testcase.spec) if err != nil { t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) } @@ -136,9 +132,10 @@ func TestPathMapper(t *testing.T) { // Add a few test cases to ensure we cover some errors // Specify a path that requires a revision and get a digest validation error. - badpath, err := pm.path(manifestSignaturesPathSpec{ + badpath, err := pathFor(manifestSignaturesPathSpec{ name: "foo/bar", }) + if err == nil { t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) } diff --git a/docs/storage/purgeuploads.go b/docs/storage/purgeuploads.go index c66f8881..7576b189 100644 --- a/docs/storage/purgeuploads.go +++ b/docs/storage/purgeuploads.go @@ -62,10 +62,11 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv uploads := make(map[string]uploadData, 0) inUploadDir := false - root, err := defaultPathMapper.path(repositoriesRootPathSpec{}) + root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return uploads, append(errors, err) } + err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) diff --git a/docs/storage/purgeuploads_test.go b/docs/storage/purgeuploads_test.go index 18c98af8..3b70f723 100644 --- a/docs/storage/purgeuploads_test.go +++ b/docs/storage/purgeuploads_test.go @@ -12,8 +12,6 @@ import ( "github.com/docker/distribution/uuid" ) -var pm = defaultPathMapper - func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() ctx := context.Background() @@ -24,7 +22,7 @@ func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time. } func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { - dataPath, err := pm.path(uploadDataPathSpec{name: repo, id: uploadID}) + dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -32,7 +30,7 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa t.Fatalf("Unable to write data file") } - startedAtPath, err := pm.path(uploadStartedAtPathSpec{name: repo, id: uploadID}) + startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } @@ -115,7 +113,7 @@ func TestPurgeOnlyUploads(t *testing.T) { // Create a directory tree outside _uploads and ensure // these files aren't deleted. - dataPath, err := pm.path(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) + dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index b6e0ba4d..da95054e 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -30,7 +30,6 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv // create global statter, with cache. var statter distribution.BlobDescriptorService = &blobStatter{ driver: driver, - pm: defaultPathMapper, } if blobDescriptorCacheProvider != nil { @@ -39,7 +38,6 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv bs := &blobStore{ driver: driver, - pm: defaultPathMapper, statter: statter, } diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index 105d66f3..f5888f64 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -26,7 +26,7 @@ func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobSto var _ distribution.SignatureService = &signatureStore{} func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := s.blobStore.pm.path(manifestSignaturesPathSpec{ + signaturesPath, err := pathFor(manifestSignaturesPathSpec{ name: s.repository.Name(), revision: dgst, }) @@ -119,12 +119,13 @@ func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { // manifest with the given digest. Effectively, each signature link path // layout is a unique linked blob store. func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { - linkpath := func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(manifestSignatureLinkPathSpec{ + linkpath := func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestSignatureLinkPathSpec{ name: name, revision: revision, signature: dgst, }) + } return &linkedBlobStore{ diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index a7ca3301..aec95286 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -18,9 +18,10 @@ type tagStore struct { // tags lists the manifest tags for the specified repository. func (ts *tagStore) tags() ([]string, error) { - p, err := ts.blobStore.pm.path(manifestTagPathSpec{ + p, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), }) + if err != nil { return nil, err } @@ -47,10 +48,11 @@ func (ts *tagStore) tags() ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(tag string) (bool, error) { - tagPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + tagPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return false, err } @@ -66,7 +68,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { // tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) tag(tag string, revision digest.Digest) error { - currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) @@ -87,10 +89,11 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { // resolve the current revision for name and tag. func (ts *tagStore) resolve(tag string) (digest.Digest, error) { - currentPath, err := ts.blobStore.pm.path(manifestTagCurrentPathSpec{ + currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return "", err } @@ -111,10 +114,11 @@ func (ts *tagStore) resolve(tag string) (digest.Digest, error) { // delete removes the tag from repository, including the history of all // revisions that have the specified tag. func (ts *tagStore) delete(tag string) error { - tagPath, err := ts.blobStore.pm.path(manifestTagPathSpec{ + tagPath, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), tag: tag, }) + if err != nil { return err } @@ -131,12 +135,13 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob blobStore: ts.blobStore, repository: ts.repository, ctx: ctx, - linkPathFns: []linkPathFunc{func(pm *pathMapper, name string, dgst digest.Digest) (string, error) { - return pm.path(manifestTagIndexEntryLinkPathSpec{ + linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ name: name, tag: tag, revision: dgst, }) + }}, } } diff --git a/docs/storage/vacuum.go b/docs/storage/vacuum.go index 46b8096b..60d5a2fa 100644 --- a/docs/storage/vacuum.go +++ b/docs/storage/vacuum.go @@ -18,13 +18,11 @@ func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { return Vacuum{ ctx: ctx, driver: driver, - pm: defaultPathMapper, } } // Vacuum removes content from the filesystem type Vacuum struct { - pm *pathMapper driver driver.StorageDriver ctx context.Context } @@ -36,7 +34,7 @@ func (v Vacuum) RemoveBlob(dgst string) error { return err } - blobPath, err := v.pm.path(blobDataPathSpec{digest: d}) + blobPath, err := pathFor(blobDataPathSpec{digest: d}) if err != nil { return err } @@ -52,7 +50,7 @@ func (v Vacuum) RemoveBlob(dgst string) error { // RemoveRepository removes a repository directory from the // filesystem func (v Vacuum) RemoveRepository(repoName string) error { - rootForRepository, err := v.pm.path(repositoriesRootPathSpec{}) + rootForRepository, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return err } From 2e4c643419b151680d24d8f7db58682f0e621c95 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 18 Aug 2015 13:33:26 -0700 Subject: [PATCH 268/501] Fix tests after #846 Change checkResponse to only expect the configured X-Content-Type-Options header if it doesn't receive a 405 error, which means the handler isn't registered for that method. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index f3f40aac..99168220 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1248,8 +1248,10 @@ func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus t.FailNow() } - // We expect the headers included in the configuration - if !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { + // We expect the headers included in the configuration, unless the + // status code is 405 (Method Not Allowed), which means the handler + // doesn't even get called. + if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) maybeDumpResponse(t, resp) From 11133181fce484fee59479785df6ad5b2531411a Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 18 Aug 2015 15:40:14 -0700 Subject: [PATCH 269/501] Fix CloseNotifier handling and avoid "the ResponseWriter does not implement CloseNotifier" warnings in logs A change in #763 to address review comments caused problems. Originally, instrumentedResponseWriter implemented the CloseNotifier interface, and would panic if it was wrapping something that did not implement that interface. This was split into a separate instrumentedResponseWriterCN type that implements CloseNotifier, so there's a fallback if instrumentedResponseWriter ever needs to wrap something that does not implement this interface. instrumentedResponseWriter's Value method would end up upcasting either type back to instrumentedResponseWriter, which does not implement the interface. In effect, instrumentedResponseWriterCN was never visible to the handler. This fixes the problem by implementing a wrapper Value method for instrumentedResponseWriterCN. Signed-off-by: Aaron Lehmann --- docs/handlers/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index a4f3abcc..5a3c9984 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -29,7 +29,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr if notifier, ok := responseWriter.(http.CloseNotifier); ok { clientClosed = notifier.CloseNotify() } else { - ctxu.GetLogger(context).Warn("the ResponseWriter does not implement CloseNotifier") + ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) } // Read in the data, if any. From 7fb68446cc565d532fe8b7f44242d99a50e61f8e Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 18 Aug 2015 10:56:27 -0700 Subject: [PATCH 270/501] Functional options for NewRegistryWithDriver Clean up calling convention for NewRegistryWithDriver to use functional arguments. This is a first step towards the refactor described in #215. I plan to add additional options in the process of moving configurable items from the App structure to the registry structure. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 39 +++++++++--- docs/handlers/app_test.go | 6 +- docs/proxy/proxyblobstore_test.go | 10 ++- docs/proxy/proxymanifeststore_test.go | 10 ++- docs/storage/blob_test.go | 20 ++++-- docs/storage/catalog_test.go | 5 +- docs/storage/manifeststore_test.go | 10 ++- docs/storage/registry.go | 91 ++++++++++++++++++++------- 8 files changed, 145 insertions(+), 46 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index c2b392d1..7d1f1cf5 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -118,13 +118,18 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App app.configureRedis(&configuration) app.configureLogHook(&configuration) + options := []storage.RegistryOption{} + + if app.isCache { + options = append(options, storage.DisableDigestResumption) + } + // configure deletion - var deleteEnabled bool if d, ok := configuration.Storage["delete"]; ok { e, ok := d["enabled"] if ok { - if deleteEnabled, ok = e.(bool); !ok { - deleteEnabled = false + if deleteEnabled, ok := e.(bool); ok && deleteEnabled { + options = append(options, storage.EnableDelete) } } } @@ -139,10 +144,11 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App default: panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) } - - if redirectDisabled { - ctxu.GetLogger(app).Infof("backend redirection disabled") - } + } + if redirectDisabled { + ctxu.GetLogger(app).Infof("backend redirection disabled") + } else { + options = append(options, storage.EnableRedirect) } // configure storage caches @@ -158,10 +164,20 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } - app.registry = storage.NewRegistryWithDriver(app, app.driver, rediscache.NewRedisBlobDescriptorCacheProvider(app.redis), deleteEnabled, !redirectDisabled, app.isCache) + cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": - app.registry = storage.NewRegistryWithDriver(app, app.driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), deleteEnabled, !redirectDisabled, app.isCache) + cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() + localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) + app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) + if err != nil { + panic("could not create registry: " + err.Error()) + } ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { @@ -172,7 +188,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App if app.registry == nil { // configure the registry if no cache section is available. - app.registry = storage.NewRegistryWithDriver(app.Context, app.driver, nil, deleteEnabled, !redirectDisabled, app.isCache) + app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) + if err != nil { + panic("could not create registry: " + err.Error()) + } } app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 3ef2342c..fbb0b188 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -26,12 +26,16 @@ import ( func TestAppDispatcher(t *testing.T) { driver := inmemory.New() ctx := context.Background() + registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } app := &App{ Config: configuration.Configuration{}, Context: ctx, router: v2.Router(), driver: driver, - registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true, true, false), + registry: registry, } server := httptest.NewServer(app) router := v2.Router() diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 65d5f922..f8845ed3 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -80,13 +80,19 @@ func (te testEnv) RemoteStats() *map[string]int { func makeTestEnv(t *testing.T, name string) testEnv { ctx := context.Background() - localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } localRepo, err := localRegistry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } truthRepo, err := truthRegistry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 7b9b8091..9d5f3f66 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -73,7 +73,10 @@ func (sm statsManifest) Tags() ([]string, error) { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() - truthRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, false, false) + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } truthRepo, err := truthRegistry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -92,7 +95,10 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE t.Fatalf(err.Error()) } - localRegistry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, true) + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } localRepo, err := localRegistry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index e5cfa83e..c84c7432 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -33,7 +33,10 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -193,7 +196,10 @@ func TestSimpleBlobUpload(t *testing.T) { } // Reuse state to test delete with a delete-disabled registry - registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err = registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -212,7 +218,10 @@ func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) @@ -316,7 +325,10 @@ func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/catalog_test.go b/docs/storage/catalog_test.go index ed96f50c..eb062c5b 100644 --- a/docs/storage/catalog_test.go +++ b/docs/storage/catalog_test.go @@ -22,7 +22,10 @@ func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() - registry := NewRegistryWithDriver(ctx, d, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } rootpath, _ := pathFor(repositoriesRootPathSpec{}) repos := []string{ diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 4ad74820..7665c5c8 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -29,7 +29,10 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true, true, false) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repo, err := registry.Repository(ctx, name) if err != nil { @@ -348,7 +351,10 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest get returned non-nil") } - r := NewRegistryWithDriver(ctx, env.driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false, true, false) + r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } repo, err := r.Repository(ctx, env.name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index da95054e..0b38ea9b 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -12,28 +12,65 @@ import ( // package. All instances should descend from this object. type registry struct { blobStore *blobStore - blobServer distribution.BlobServer - statter distribution.BlobStatter // global statter service. + blobServer *blobServer + statter *blobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool resumableDigestEnabled bool } -// NewRegistryWithDriver creates a new registry instance from the provided -// driver. The resulting registry may be shared by multiple goroutines but is -// cheap to allocate. If redirect is true, the backend blob server will -// attempt to use (StorageDriver).URLFor to serve all blobs. -// -// TODO(stevvooe): This function signature is getting very out of hand. Move to -// functional options for instance configuration. -func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriver, blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider, deleteEnabled bool, redirect bool, isCache bool) distribution.Namespace { - // create global statter, with cache. - var statter distribution.BlobDescriptorService = &blobStatter{ - driver: driver, - } +// RegistryOption is the type used for functional options for NewRegistry. +type RegistryOption func(*registry) error - if blobDescriptorCacheProvider != nil { - statter = cache.NewCachedBlobStatter(blobDescriptorCacheProvider, statter) +// EnableRedirect is a functional option for NewRegistry. It causes the backend +// blob server to attempt using (StorageDriver).URLFor to serve all blobs. +func EnableRedirect(registry *registry) error { + registry.blobServer.redirect = true + return nil +} + +// EnableDelete is a functional option for NewRegistry. It enables deletion on +// the registry. +func EnableDelete(registry *registry) error { + registry.deleteEnabled = true + return nil +} + +// DisableDigestResumption is a functional option for NewRegistry. It should be +// used if the registry is acting as a caching proxy. +func DisableDigestResumption(registry *registry) error { + registry.resumableDigestEnabled = false + return nil +} + +// BlobDescriptorCacheProvider returns a functional option for +// NewRegistry. It creates a cached blob statter for use by the +// registry. +func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { + // TODO(aaronl): The duplication of statter across several objects is + // ugly, and prevents us from using interface types in the registry + // struct. Ideally, blobStore and blobServer should be lazily + // initialized, and use the current value of + // blobDescriptorCacheProvider. + return func(registry *registry) error { + if blobDescriptorCacheProvider != nil { + statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) + registry.blobStore.statter = statter + registry.blobServer.statter = statter + registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider + } + return nil + } +} + +// NewRegistry creates a new registry instance from the provided driver. The +// resulting registry may be shared by multiple goroutines but is cheap to +// allocate. If the Redirect option is specified, the backend blob server will +// attempt to use (StorageDriver).URLFor to serve all blobs. +func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { + // create global statter + statter := &blobStatter{ + driver: driver, } bs := &blobStore{ @@ -41,18 +78,24 @@ func NewRegistryWithDriver(ctx context.Context, driver storagedriver.StorageDriv statter: statter, } - return ®istry{ + registry := ®istry{ blobStore: bs, blobServer: &blobServer{ - driver: driver, - statter: statter, - pathFn: bs.path, - redirect: redirect, + driver: driver, + statter: statter, + pathFn: bs.path, }, - blobDescriptorCacheProvider: blobDescriptorCacheProvider, - deleteEnabled: deleteEnabled, - resumableDigestEnabled: !isCache, + statter: statter, + resumableDigestEnabled: true, } + + for _, option := range options { + if err := option(registry); err != nil { + return nil, err + } + } + + return registry, nil } // Scope returns the namespace scope for a registry. The registry From 142b68aaa2c27215b3fdc29a17ed77112bd415e7 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 11:37:53 -0700 Subject: [PATCH 271/501] Add a unit test which verifies the ResponseWriter endpoints see implements CloseNotifier Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 99168220..e351cb95 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1460,3 +1460,31 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) } + +// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter +// that implements http.ContextNotifier. +func TestCheckContextNotifier(t *testing.T) { + env := newTestEnv(t, false) + + // Register a new endpoint for testing + env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { + return handlers.MethodHandler{ + "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, ok := w.(http.CloseNotifier); !ok { + t.Fatal("could not cast ResponseWriter to CloseNotifier") + } + w.WriteHeader(200) + }), + } + })) + + resp, err := http.Get(env.server.URL + "/unittest/reponame/") + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) + } +} From c48e460933d15050ff502ba53624aa68f74b7873 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 18 Aug 2015 17:19:46 -0700 Subject: [PATCH 272/501] Add configurable file-existence and HTTP health checks Add a section to the config file called "health". Within this section, "filecheckers" and "httpcheckers" list checks to run. Each check specifies a file or URI, a time interval for the check, and a threshold specifying how many times the check must fail to reach an unhealthy state. Document the new options in docs/configuration.md. Add unit testing for both types of checkers. Add an UnregisterAll function in the health package to support the unit tests, and an Unregister function for consistency with Register. Fix a string conversion problem in the health package's HTTP checker. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 34 +++++- docs/handlers/health_test.go | 200 +++++++++++++++++++++++++++++++++++ 2 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 docs/handlers/health_test.go diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 7d1f1cf5..8b8543dd 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -15,6 +15,7 @@ import ( "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/health" + "github.com/docker/distribution/health/checks" "github.com/docker/distribution/notifications" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -37,6 +38,9 @@ import ( // was specified. const randomSecretSize = 32 +// defaultCheckInterval is the default time in between health checks +const defaultCheckInterval = 10 * time.Second + // App is a global registry application object. Shared resources can be placed // on this object that will be accessible from all requests. Any writable // fields should be protected. @@ -231,10 +235,38 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // implementing this properly will require a refactor. This method may panic // if called twice in the same process. func (app *App) RegisterHealthChecks() { - health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), 10*time.Second, 3, func() error { + health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), defaultCheckInterval, 3, func() error { _, err := app.driver.List(app, "/") // "/" should always exist return err // any error will be treated as failure }) + + for _, fileChecker := range app.Config.Health.FileCheckers { + interval := fileChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + if fileChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d, threshold=%d", fileChecker.File, interval/time.Second, fileChecker.Threshold) + health.Register(fileChecker.File, health.PeriodicThresholdChecker(checks.FileChecker(fileChecker.File), interval, fileChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + health.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + } + } + + for _, httpChecker := range app.Config.Health.HTTPCheckers { + interval := httpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + if httpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + health.Register(httpChecker.URI, health.PeriodicThresholdChecker(checks.HTTPChecker(httpChecker.URI), interval, httpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + health.Register(httpChecker.URI, health.PeriodicChecker(checks.HTTPChecker(httpChecker.URI), interval)) + } + } } // register a handler with the application, by route name. The handler will be diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go new file mode 100644 index 00000000..ce5860a8 --- /dev/null +++ b/docs/handlers/health_test.go @@ -0,0 +1,200 @@ +package handlers + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/health" + "golang.org/x/net/context" +) + +func TestFileHealthCheck(t *testing.T) { + // In case other tests registered checks before this one + health.UnregisterAll() + + interval := time.Second + + tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") + if err != nil { + t.Fatalf("could not create temporary file: %v", err) + } + defer tmpfile.Close() + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + Health: configuration.Health{ + FileCheckers: []configuration.FileChecker{ + { + Interval: interval, + File: tmpfile.Name(), + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + app.RegisterHealthChecks() + + debugServer := httptest.NewServer(nil) + + // Wait for health check to happen + <-time.After(2 * interval) + + resp, err := http.Get(debugServer.URL + "/debug/health") + if err != nil { + t.Fatalf("error performing HTTP GET: %v", err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading HTTP body: %v", err) + } + resp.Body.Close() + var decoded map[string]string + err = json.Unmarshal(body, &decoded) + if err != nil { + t.Fatalf("error unmarshaling json: %v", err) + } + if len(decoded) != 1 { + t.Fatal("expected 1 item in returned json") + } + if decoded[tmpfile.Name()] != "file exists" { + t.Fatal(`did not get "file exists" result for health check`) + } + + os.Remove(tmpfile.Name()) + + <-time.After(2 * interval) + resp, err = http.Get(debugServer.URL + "/debug/health") + if err != nil { + t.Fatalf("error performing HTTP GET: %v", err) + } + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading HTTP body: %v", err) + } + resp.Body.Close() + var decoded2 map[string]string + err = json.Unmarshal(body, &decoded2) + if err != nil { + t.Fatalf("error unmarshaling json: %v", err) + } + if len(decoded2) != 0 { + t.Fatal("expected 0 items in returned json") + } +} + +func TestHTTPHealthCheck(t *testing.T) { + // In case other tests registered checks before this one + health.UnregisterAll() + + interval := time.Second + threshold := 3 + + stopFailing := make(chan struct{}) + + checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Fatalf("expected HEAD request, got %s", r.Method) + } + select { + case <-stopFailing: + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusInternalServerError) + } + })) + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + Health: configuration.Health{ + HTTPCheckers: []configuration.HTTPChecker{ + { + Interval: interval, + URI: checkedServer.URL, + Threshold: threshold, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + app.RegisterHealthChecks() + + debugServer := httptest.NewServer(nil) + + for i := 0; ; i++ { + <-time.After(interval) + + resp, err := http.Get(debugServer.URL + "/debug/health") + if err != nil { + t.Fatalf("error performing HTTP GET: %v", err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading HTTP body: %v", err) + } + resp.Body.Close() + var decoded map[string]string + err = json.Unmarshal(body, &decoded) + if err != nil { + t.Fatalf("error unmarshaling json: %v", err) + } + + if i < threshold-1 { + // definitely shouldn't have hit the threshold yet + if len(decoded) != 0 { + t.Fatal("expected 1 items in returned json") + } + continue + } + if i < threshold+1 { + // right on the threshold - don't expect a failure yet + continue + } + + if len(decoded) != 1 { + t.Fatal("expected 1 item in returned json") + } + if decoded[checkedServer.URL] != "downstream service returned unexpected status: 500" { + t.Fatal("did not get expected result for health check") + } + + break + } + + // Signal HTTP handler to start returning 200 + close(stopFailing) + + <-time.After(2 * interval) + resp, err := http.Get(debugServer.URL + "/debug/health") + if err != nil { + t.Fatalf("error performing HTTP GET: %v", err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading HTTP body: %v", err) + } + resp.Body.Close() + var decoded map[string]string + err = json.Unmarshal(body, &decoded) + if err != nil { + t.Fatalf("error unmarshaling json: %v", err) + } + if len(decoded) != 0 { + t.Fatal("expected 0 items in returned json") + } +} From 68e8532cefe7c27cee9cc07fb3d2d781ead65fec Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 14:12:51 -0700 Subject: [PATCH 273/501] Add storagedriver section to health check configuration Add default storagedriver health check to example configuration files with parameters matching the previous hardcoded configuration. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 8b8543dd..9cf6447a 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -235,10 +235,23 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // implementing this properly will require a refactor. This method may panic // if called twice in the same process. func (app *App) RegisterHealthChecks() { - health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), defaultCheckInterval, 3, func() error { - _, err := app.driver.List(app, "/") // "/" should always exist - return err // any error will be treated as failure - }) + if app.Config.Health.StorageDriver.Enabled { + interval := app.Config.Health.StorageDriver.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + storageDriverCheck := func() error { + _, err := app.driver.List(app, "/") // "/" should always exist + return err // any error will be treated as failure + } + + if app.Config.Health.StorageDriver.Threshold != 0 { + health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + } else { + health.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + } + } for _, fileChecker := range app.Config.Health.FileCheckers { interval := fileChecker.Interval From bbd4699166bcf57ce025b66b934fba03d39e9753 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 14:24:31 -0700 Subject: [PATCH 274/501] Switch tests to import "github.com/docker/distribution/context" Signed-off-by: Aaron Lehmann --- docs/auth/silly/access_test.go | 2 +- docs/auth/token/token_test.go | 2 +- docs/handlers/api_test.go | 2 +- docs/handlers/app_test.go | 2 +- docs/handlers/health_test.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go index 8b5ecb80..ff2155b1 100644 --- a/docs/auth/silly/access_test.go +++ b/docs/auth/silly/access_test.go @@ -5,8 +5,8 @@ import ( "net/http/httptest" "testing" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" - "golang.org/x/net/context" ) func TestSillyAccessController(t *testing.T) { diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go index 9d84d4ef..119aa738 100644 --- a/docs/auth/token/token_test.go +++ b/docs/auth/token/token_test.go @@ -15,9 +15,9 @@ import ( "testing" "time" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" - "golang.org/x/net/context" ) func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index e351cb95..a975bd33 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/registry/api/errcode" @@ -27,7 +28,6 @@ import ( "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" - "golang.org/x/net/context" ) var headerConfig = http.Header{ diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index fbb0b188..0038a97d 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" @@ -16,7 +17,6 @@ import ( "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" - "golang.org/x/net/context" ) // TestAppDispatcher builds an application with a test dispatcher and ensures diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go index ce5860a8..38ea9b2f 100644 --- a/docs/handlers/health_test.go +++ b/docs/handlers/health_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" "github.com/docker/distribution/health" - "golang.org/x/net/context" ) func TestFileHealthCheck(t *testing.T) { From cdc3143b7e8dfc52223fb34bc611842662b942cb Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 15:11:10 -0700 Subject: [PATCH 275/501] Expose a Registry type in health package, so unit tests can stay isolated from each other Update docs. Change health_test.go tests to create their own registries and register the checks there. The tests now call CheckStatus directly instead of polling the HTTP handler, which returns results from the default registry. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 22 +++++--- docs/handlers/health_test.go | 100 +++++++---------------------------- 2 files changed, 34 insertions(+), 88 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 9cf6447a..91f4e1a3 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -234,7 +234,15 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App // process. Because the configuration and app are tightly coupled, // implementing this properly will require a refactor. This method may panic // if called twice in the same process. -func (app *App) RegisterHealthChecks() { +func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { + if len(healthRegistries) > 1 { + panic("RegisterHealthChecks called with more than one registry") + } + healthRegistry := health.DefaultRegistry + if len(healthRegistries) == 1 { + healthRegistry = healthRegistries[0] + } + if app.Config.Health.StorageDriver.Enabled { interval := app.Config.Health.StorageDriver.Interval if interval == 0 { @@ -247,9 +255,9 @@ func (app *App) RegisterHealthChecks() { } if app.Config.Health.StorageDriver.Threshold != 0 { - health.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) + healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) } else { - health.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) + healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) } } @@ -260,10 +268,10 @@ func (app *App) RegisterHealthChecks() { } if fileChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d, threshold=%d", fileChecker.File, interval/time.Second, fileChecker.Threshold) - health.Register(fileChecker.File, health.PeriodicThresholdChecker(checks.FileChecker(fileChecker.File), interval, fileChecker.Threshold)) + healthRegistry.Register(fileChecker.File, health.PeriodicThresholdChecker(checks.FileChecker(fileChecker.File), interval, fileChecker.Threshold)) } else { ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) - health.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) } } @@ -274,10 +282,10 @@ func (app *App) RegisterHealthChecks() { } if httpChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) - health.Register(httpChecker.URI, health.PeriodicThresholdChecker(checks.HTTPChecker(httpChecker.URI), interval, httpChecker.Threshold)) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checks.HTTPChecker(httpChecker.URI), interval, httpChecker.Threshold)) } else { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) - health.Register(httpChecker.URI, health.PeriodicChecker(checks.HTTPChecker(httpChecker.URI), interval)) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checks.HTTPChecker(httpChecker.URI), interval)) } } } diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go index 38ea9b2f..de2b71cc 100644 --- a/docs/handlers/health_test.go +++ b/docs/handlers/health_test.go @@ -1,7 +1,6 @@ package handlers import ( - "encoding/json" "io/ioutil" "net/http" "net/http/httptest" @@ -15,9 +14,6 @@ import ( ) func TestFileHealthCheck(t *testing.T) { - // In case other tests registered checks before this one - health.UnregisterAll() - interval := time.Second tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") @@ -43,60 +39,29 @@ func TestFileHealthCheck(t *testing.T) { ctx := context.Background() app := NewApp(ctx, config) - app.RegisterHealthChecks() - - debugServer := httptest.NewServer(nil) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) // Wait for health check to happen <-time.After(2 * interval) - resp, err := http.Get(debugServer.URL + "/debug/health") - if err != nil { - t.Fatalf("error performing HTTP GET: %v", err) + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading HTTP body: %v", err) - } - resp.Body.Close() - var decoded map[string]string - err = json.Unmarshal(body, &decoded) - if err != nil { - t.Fatalf("error unmarshaling json: %v", err) - } - if len(decoded) != 1 { - t.Fatal("expected 1 item in returned json") - } - if decoded[tmpfile.Name()] != "file exists" { + if status[tmpfile.Name()] != "file exists" { t.Fatal(`did not get "file exists" result for health check`) } os.Remove(tmpfile.Name()) <-time.After(2 * interval) - resp, err = http.Get(debugServer.URL + "/debug/health") - if err != nil { - t.Fatalf("error performing HTTP GET: %v", err) - } - body, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading HTTP body: %v", err) - } - resp.Body.Close() - var decoded2 map[string]string - err = json.Unmarshal(body, &decoded2) - if err != nil { - t.Fatalf("error unmarshaling json: %v", err) - } - if len(decoded2) != 0 { - t.Fatal("expected 0 items in returned json") + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") } } func TestHTTPHealthCheck(t *testing.T) { - // In case other tests registered checks before this one - health.UnregisterAll() - interval := time.Second threshold := 3 @@ -132,32 +97,18 @@ func TestHTTPHealthCheck(t *testing.T) { ctx := context.Background() app := NewApp(ctx, config) - app.RegisterHealthChecks() - - debugServer := httptest.NewServer(nil) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) for i := 0; ; i++ { <-time.After(interval) - resp, err := http.Get(debugServer.URL + "/debug/health") - if err != nil { - t.Fatalf("error performing HTTP GET: %v", err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading HTTP body: %v", err) - } - resp.Body.Close() - var decoded map[string]string - err = json.Unmarshal(body, &decoded) - if err != nil { - t.Fatalf("error unmarshaling json: %v", err) - } + status := healthRegistry.CheckStatus() if i < threshold-1 { // definitely shouldn't have hit the threshold yet - if len(decoded) != 0 { - t.Fatal("expected 1 items in returned json") + if len(status) != 0 { + t.Fatal("expected 1 item in health check results") } continue } @@ -166,10 +117,10 @@ func TestHTTPHealthCheck(t *testing.T) { continue } - if len(decoded) != 1 { - t.Fatal("expected 1 item in returned json") + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") } - if decoded[checkedServer.URL] != "downstream service returned unexpected status: 500" { + if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { t.Fatal("did not get expected result for health check") } @@ -180,21 +131,8 @@ func TestHTTPHealthCheck(t *testing.T) { close(stopFailing) <-time.After(2 * interval) - resp, err := http.Get(debugServer.URL + "/debug/health") - if err != nil { - t.Fatalf("error performing HTTP GET: %v", err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("error reading HTTP body: %v", err) - } - resp.Body.Close() - var decoded map[string]string - err = json.Unmarshal(body, &decoded) - if err != nil { - t.Fatalf("error unmarshaling json: %v", err) - } - if len(decoded) != 0 { - t.Fatal("expected 0 items in returned json") + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") } } From ca3d460278e7e0df31428e349aa1a761dd68f826 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 17:57:18 -0700 Subject: [PATCH 276/501] Add a TCP health checker Also, add timeout and status code parameters to the HTTP checker, and remove the threshold parameter for the file checker. Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 38 ++++++++++++++++------ docs/handlers/health_test.go | 63 ++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 9 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 91f4e1a3..24f43f37 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -266,13 +266,8 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { if interval == 0 { interval = defaultCheckInterval } - if fileChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d, threshold=%d", fileChecker.File, interval/time.Second, fileChecker.Threshold) - healthRegistry.Register(fileChecker.File, health.PeriodicThresholdChecker(checks.FileChecker(fileChecker.File), interval, fileChecker.Threshold)) - } else { - ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) - healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) - } + ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) } for _, httpChecker := range app.Config.Health.HTTPCheckers { @@ -280,12 +275,37 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { if interval == 0 { interval = defaultCheckInterval } + + statusCode := httpChecker.StatusCode + if statusCode == 0 { + statusCode = 200 + } + + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout) + if httpChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) - healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checks.HTTPChecker(httpChecker.URI), interval, httpChecker.Threshold)) + healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) } else { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) - healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checks.HTTPChecker(httpChecker.URI), interval)) + healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) + } + } + + for _, tcpChecker := range app.Config.Health.TCPCheckers { + interval := tcpChecker.Interval + if interval == 0 { + interval = defaultCheckInterval + } + + checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) + + if tcpChecker.Threshold != 0 { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) + } else { + ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) } } } diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go index de2b71cc..bb460b47 100644 --- a/docs/handlers/health_test.go +++ b/docs/handlers/health_test.go @@ -2,6 +2,7 @@ package handlers import ( "io/ioutil" + "net" "net/http" "net/http/httptest" "os" @@ -61,6 +62,68 @@ func TestFileHealthCheck(t *testing.T) { } } +func TestTCPHealthCheck(t *testing.T) { + interval := time.Second + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("could not create listener: %v", err) + } + addrStr := ln.Addr().String() + + // Start accepting + go func() { + for { + conn, err := ln.Accept() + if err != nil { + // listener was closed + return + } + defer conn.Close() + } + }() + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + Health: configuration.Health{ + TCPCheckers: []configuration.TCPChecker{ + { + Interval: interval, + Addr: addrStr, + Timeout: 500 * time.Millisecond, + }, + }, + }, + } + + ctx := context.Background() + + app := NewApp(ctx, config) + healthRegistry := health.NewRegistry() + app.RegisterHealthChecks(healthRegistry) + + // Wait for health check to happen + <-time.After(2 * interval) + + if len(healthRegistry.CheckStatus()) != 0 { + t.Fatal("expected 0 items in health check results") + } + + ln.Close() + <-time.After(2 * interval) + + // Health check should now fail + status := healthRegistry.CheckStatus() + if len(status) != 1 { + t.Fatal("expected 1 item in health check results") + } + if status[addrStr] != "connection to "+addrStr+" failed" { + t.Fatal(`did not get "connection failed" result for health check`) + } +} + func TestHTTPHealthCheck(t *testing.T) { interval := time.Second threshold := 3 From 5b804f76009e7a4df08b3e5dcb6ebf4dac8151c4 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 19 Aug 2015 18:23:58 -0700 Subject: [PATCH 277/501] Add headers parameter for HTTP checker Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 24f43f37..b1e46b02 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -281,7 +281,7 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { statusCode = 200 } - checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout) + checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) if httpChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) From 7dd03e12bbc8ac5a426881260352f1eeb7b78cf6 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 20 Aug 2015 17:36:24 -0700 Subject: [PATCH 278/501] More consistent return from ErrorCode.Error() To bring ErrorCode into liine with Go conventions, ErrorCode.Error() now returns the "nice" value of the error code. This ensures error message assembly works similar to commonly used Go conventions when directly using ErrorCode as an error. Signed-off-by: Stephen J Day --- docs/api/errcode/errors.go | 7 ++--- docs/api/errcode/errors_test.go | 52 ++++++++++++++++++--------------- 2 files changed, 32 insertions(+), 27 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index fdaddbcf..9a405d21 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -25,7 +25,8 @@ func (ec ErrorCode) ErrorCode() ErrorCode { // Error returns the ID/Value func (ec ErrorCode) Error() string { - return ec.Descriptor().Value + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. @@ -104,9 +105,7 @@ func (e Error) ErrorCode() ErrorCode { // Error returns a human readable representation of the error. func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with diff --git a/docs/api/errcode/errors_test.go b/docs/api/errcode/errors_test.go index 27fb1cec..54e7a736 100644 --- a/docs/api/errcode/errors_test.go +++ b/docs/api/errcode/errors_test.go @@ -4,9 +4,33 @@ import ( "encoding/json" "net/http" "reflect" + "strings" "testing" ) +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { @@ -56,33 +80,15 @@ func TestErrorCodes(t *testing.T) { if ecUnmarshaled != ec { t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } } } -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST1", - Message: "test error 1", - Description: `Just a test message #1.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST2", - Message: "test error 2", - Description: `Just a test message #2.`, - HTTPStatusCode: http.StatusNotFound, -}) - -var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST3", - Message: "Sorry %q isn't valid", - Description: `Just a test message #3.`, - HTTPStatusCode: http.StatusNotFound, -}) - func TestErrorsManagement(t *testing.T) { var errs Errors From bb098c72a2bba06089bf54957a1d8b3b73bed49b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 20 Aug 2015 21:24:30 -0700 Subject: [PATCH 279/501] Move manifest package to schema1 As we begin our march towards multi-arch, we must prepare for the reality of multiple manifest schemas. This is the beginning of a set of changes to facilitate this. We are both moving this package into its target position where it may live peacefully next to other manfiest versions. Signed-off-by: Stephen J Day --- docs/client/repository.go | 10 +++++----- docs/client/repository_test.go | 17 +++++++++-------- docs/handlers/api_test.go | 25 +++++++++++++------------ docs/handlers/images.go | 8 ++++---- docs/proxy/proxymanifeststore.go | 12 ++++++------ docs/proxy/proxymanifeststore_test.go | 11 ++++++----- docs/storage/manifeststore.go | 12 ++++++------ docs/storage/manifeststore_test.go | 11 ++++++----- docs/storage/revisionstore.go | 10 +++++----- 9 files changed, 60 insertions(+), 56 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index c1e8e07f..bbf53ce2 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -14,7 +14,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" @@ -242,7 +242,7 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return false, handleErrorResponse(resp) } -func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { // Call by Tag endpoint since the API uses the same // URL endpoint for tags and digests. return ms.GetByTag(dgst.String()) @@ -262,7 +262,7 @@ func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { } } -func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { for _, option := range options { err := option(ms) if err != nil { @@ -290,7 +290,7 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic if resp.StatusCode == http.StatusNotModified { return nil, nil } else if SuccessStatus(resp.StatusCode) { - var sm manifest.SignedManifest + var sm schema1.SignedManifest decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&sm); err != nil { @@ -301,7 +301,7 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic return nil, handleErrorResponse(resp) } -func (ms *manifests) Put(m *manifest.SignedManifest) error { +func (ms *manifests) Put(m *schema1.SignedManifest) error { manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) if err != nil { return err diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 8a7a598e..c5a4d6a5 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -20,6 +20,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/testutil" ) @@ -419,19 +420,19 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { - blobs := make([]manifest.FSLayer, blobCount) - history := make([]manifest.History, blobCount) +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest) { + blobs := make([]schema1.FSLayer, blobCount) + history := make([]schema1.History, blobCount) for i := 0; i < blobCount; i++ { dgst, blob := newRandomBlob((i % 5) * 16) - blobs[i] = manifest.FSLayer{BlobSum: dgst} - history[i] = manifest.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} + blobs[i] = schema1.FSLayer{BlobSum: dgst} + history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } - m := &manifest.SignedManifest{ - Manifest: manifest.Manifest{ + m := &schema1.SignedManifest{ + Manifest: schema1.Manifest{ Name: name, Tag: tag, Architecture: "x86", @@ -521,7 +522,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request } -func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { +func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { if m1.Name != m2.Name { return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index a975bd33..3473baf5 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -22,6 +22,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -648,7 +649,7 @@ func httpDelete(url string) (*http.Response, error) { type manifestArgs struct { imageName string - signedManifest *manifest.SignedManifest + signedManifest *schema1.SignedManifest dgst digest.Digest } @@ -741,13 +742,13 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m // -------------------------------- // Attempt to push unsigned manifest with missing layers - unsignedManifest := &manifest.Manifest{ + unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, - FSLayers: []manifest.FSLayer{ + FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, @@ -797,7 +798,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m // ------------------- // Push the signed manifest with all layers pushed. - signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } @@ -844,7 +845,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) - var fetchedManifest manifest.SignedManifest + var fetchedManifest schema1.SignedManifest dec := json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) @@ -866,7 +867,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) - var fetchedManifestByDigest manifest.SignedManifest + var fetchedManifestByDigest schema1.SignedManifest dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestByDigest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) @@ -1062,7 +1063,7 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { var body []byte - if sm, ok := v.(*manifest.SignedManifest); ok { + if sm, ok := v.(*schema1.SignedManifest); ok { body = sm.Raw } else { var err error @@ -1355,13 +1356,13 @@ func checkErr(t *testing.T, err error, msg string) { } func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { - unsignedManifest := &manifest.Manifest{ + unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, - FSLayers: []manifest.FSLayer{ + FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, @@ -1389,7 +1390,7 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } - signedManifest, err := manifest.Sign(unsignedManifest, env.pk) + signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } @@ -1425,13 +1426,13 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { } // Manifest upload - unsignedManifest := &manifest.Manifest{ + unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, - FSLayers: []manifest.FSLayer{}, + FSLayers: []schema1.FSLayer{}, } resp := putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) diff --git a/docs/handlers/images.go b/docs/handlers/images.go index f4f0db89..e1931730 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" @@ -57,7 +57,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } - var sm *manifest.SignedManifest + var sm *schema1.SignedManifest if imh.Tag != "" { sm, err = manifests.GetByTag(imh.Tag) } else { @@ -119,7 +119,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - var manifest manifest.SignedManifest + var manifest schema1.SignedManifest if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return @@ -229,7 +229,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h // digestManifest takes a digest of the given manifest. This belongs somewhere // better but we'll wait for a refactoring cycle to find that real somewhere. -func digestManifest(ctx context.Context, sm *manifest.SignedManifest) (digest.Digest, error) { +func digestManifest(ctx context.Context, sm *schema1.SignedManifest) (digest.Digest, error) { p, err := sm.Payload() if err != nil { if !strings.Contains(err.Error(), "missing signature key") { diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index e314e84f..1400cf02 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -36,7 +36,7 @@ func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { return pms.remoteManifests.Exists(dgst) } -func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (pms proxyManifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { sm, err := pms.localManifests.Get(dgst) if err == nil { proxyMetrics.ManifestPush(uint64(len(sm.Raw))) @@ -81,7 +81,7 @@ func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { return pms.remoteManifests.ExistsByTag(tag) } -func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { var localDigest digest.Digest localManifest, err := pms.localManifests.GetByTag(tag, options...) @@ -100,7 +100,7 @@ func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.Manif } fromremote: - var sm *manifest.SignedManifest + var sm *schema1.SignedManifest sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) if err != nil { return nil, err @@ -130,7 +130,7 @@ fromremote: return sm, err } -func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { +func manifestDigest(sm *schema1.SignedManifest) (digest.Digest, error) { payload, err := sm.Payload() if err != nil { return "", err @@ -145,7 +145,7 @@ func manifestDigest(sm *manifest.SignedManifest) (digest.Digest, error) { return dgst, nil } -func (pms proxyManifestStore) Put(manifest *manifest.SignedManifest) error { +func (pms proxyManifestStore) Put(manifest *schema1.SignedManifest) error { return distribution.ErrUnsupported } diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 9d5f3f66..6e0fc51e 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -51,17 +52,17 @@ func (sm statsManifest) ExistsByTag(tag string) (bool, error) { return sm.manifests.ExistsByTag(tag) } -func (sm statsManifest) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (sm statsManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { sm.stats["get"]++ return sm.manifests.Get(dgst) } -func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { sm.stats["getbytag"]++ return sm.manifests.GetByTag(tag, options...) } -func (sm statsManifest) Put(manifest *manifest.SignedManifest) error { +func (sm statsManifest) Put(manifest *schema1.SignedManifest) error { sm.stats["put"]++ return sm.manifests.Put(manifest) } @@ -126,7 +127,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE } func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { - m := manifest.Manifest{ + m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, @@ -159,7 +160,7 @@ func populateRepo(t *testing.T, ctx context.Context, repository distribution.Rep t.Fatalf("unexpected error generating private key: %v", err) } - sm, err := manifest.Sign(&m, pk) + sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index c8c19d43..db49aaa4 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/libtrust" ) @@ -35,7 +35,7 @@ func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } -func (ms *manifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) { +func (ms *manifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") return ms.revisionStore.get(ms.ctx, dgst) } @@ -50,7 +50,7 @@ func SkipLayerVerification(ms distribution.ManifestService) error { return fmt.Errorf("skip layer verification only valid for manifeststore") } -func (ms *manifestStore) Put(manifest *manifest.SignedManifest) error { +func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") if err := ms.verifyManifest(ms.ctx, manifest); err != nil { @@ -83,7 +83,7 @@ func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { return ms.tagStore.exists(tag) } -func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) { +func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { for _, option := range options { err := option(ms) if err != nil { @@ -104,13 +104,13 @@ func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestSe // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid // content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *manifest.SignedManifest) error { +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { var errs distribution.ErrManifestVerification if mnfst.Name != ms.repository.Name() { errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } - if _, err := manifest.Verify(mnfst); err != nil { + if _, err := schema1.Verify(mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: errs = append(errs, distribution.ErrManifestUnverified{}) diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 7665c5c8..30126e4b 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -75,7 +76,7 @@ func TestManifestStorage(t *testing.T) { } } - m := manifest.Manifest{ + m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, @@ -94,7 +95,7 @@ func TestManifestStorage(t *testing.T) { dgst := digest.Digest(ds) testLayers[digest.Digest(dgst)] = rs - m.FSLayers = append(m.FSLayers, manifest.FSLayer{ + m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) } @@ -104,7 +105,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm, merr := manifest.Sign(&m, pk) + sm, merr := schema1.Sign(&m, pk) if merr != nil { t.Fatalf("error signing manifest: %v", err) } @@ -232,7 +233,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error generating private key: %v", err) } - sm2, err := manifest.Sign(&m, pk2) + sm2, err := schema1.Sign(&m, pk2) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } @@ -260,7 +261,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error fetching manifest: %v", err) } - if _, err := manifest.Verify(fetched); err != nil { + if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go index 9dea78e8..ed2d5dd3 100644 --- a/docs/storage/revisionstore.go +++ b/docs/storage/revisionstore.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" "github.com/docker/libtrust" ) @@ -18,7 +18,7 @@ type revisionStore struct { } // get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*manifest.SignedManifest, error) { +func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*schema1.SignedManifest, error) { // Ensure that this revision is available in this repository. _, err := rs.blobStore.Stat(ctx, revision) if err != nil { @@ -64,7 +64,7 @@ func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*mani return nil, err } - var sm manifest.SignedManifest + var sm schema1.SignedManifest if err := json.Unmarshal(raw, &sm); err != nil { return nil, err } @@ -74,7 +74,7 @@ func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*mani // put stores the manifest in the repository, if not already present. Any // updated signatures will be stored, as well. -func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) (distribution.Descriptor, error) { +func (rs *revisionStore) put(ctx context.Context, sm *schema1.SignedManifest) (distribution.Descriptor, error) { // Resolve the payload in the manifest. payload, err := sm.Payload() if err != nil { @@ -82,7 +82,7 @@ func (rs *revisionStore) put(ctx context.Context, sm *manifest.SignedManifest) ( } // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.Put(ctx, manifest.ManifestMediaType, payload) + revision, err := rs.blobStore.Put(ctx, schema1.ManifestMediaType, payload) if err != nil { context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return distribution.Descriptor{}, err From 6e7718dfce492f78916389561abc7764af646a1a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 26 Aug 2015 19:00:28 -0700 Subject: [PATCH 280/501] Correctly sanitize location url preserving parameters Signed-off-by: Stephen J Day --- docs/client/repository.go | 21 ++++++---------- docs/client/repository_test.go | 46 ++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 14 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index c1e8e07f..56d86df0 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -358,25 +358,18 @@ type blobs struct { distribution.BlobDeleter } -func sanitizeLocation(location, source string) (string, error) { +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + locationURL, err := url.Parse(location) if err != nil { return "", err } - if locationURL.Scheme == "" { - sourceURL, err := url.Parse(source) - if err != nil { - return "", err - } - locationURL = &url.URL{ - Scheme: sourceURL.Scheme, - Host: sourceURL.Host, - Path: location, - } - location = locationURL.String() - } - return location, nil + return baseURL.ResolveReference(locationURL).String(), nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 8a7a598e..384a2311 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -856,3 +856,49 @@ func TestCatalogInParts(t *testing.T) { t.Fatalf("Got wrong number of repos") } } + +func TestSanitizeLocation(t *testing.T) { + for _, testcase := range []struct { + description string + location string + source string + expected string + err error + }{ + { + description: "ensure relative location correctly resolved", + location: "/v2/foo/baasdf", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf", + }, + { + description: "ensure parameters are preserved", + location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + source: "http://blahalaja.com/v1", + expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", + }, + { + description: "ensure new hostname overidden", + location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + source: "http://blahalaja.com/v1", + expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + s, err := sanitizeLocation(testcase.location, testcase.source) + if err != testcase.err { + if testcase.err != nil { + fatalf("expected error: %v != %v", err, testcase) + } else { + fatalf("unexpected error sanitizing: %v", err) + } + } + + if s != testcase.expected { + fatalf("bad sanitize: %q != %q", s, testcase.expected) + } + } +} From 8f5f6a4e590e8fb91bf9516c9da9a7bf24e81144 Mon Sep 17 00:00:00 2001 From: Hua Wang Date: Mon, 24 Aug 2015 23:08:33 +0800 Subject: [PATCH 281/501] Add TrustId parameter to swift driver github/ncw/swift has added support for trust, so let's add it. Signed-off-by: Hua Wang --- docs/storage/driver/swift/swift.go | 2 ++ docs/storage/driver/swift/swift_test.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 0921ccc0..38c41b3d 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -61,6 +61,7 @@ type Parameters struct { TenantID string Domain string DomainID string + TrustID string Region string Container string Prefix string @@ -156,6 +157,7 @@ func New(params Parameters) (*Driver, error) { TenantId: params.TenantID, Domain: params.Domain, DomainId: params.DomainID, + TrustId: params.TrustID, Transport: transport, ConnectTimeout: 60 * time.Second, Timeout: 15 * 60 * time.Second, diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 6be2238a..705c2631 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -29,6 +29,7 @@ func init() { tenantID string domain string domainID string + trustID string container string region string insecureSkipVerify bool @@ -42,6 +43,7 @@ func init() { tenantID = os.Getenv("SWIFT_TENANT_ID") domain = os.Getenv("SWIFT_DOMAIN_NAME") domainID = os.Getenv("SWIFT_DOMAIN_ID") + trustID = os.Getenv("SWIFT_TRUST_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) @@ -71,6 +73,7 @@ func init() { tenantID, domain, domainID, + trustID, region, container, root, From 8ceca304b02f4fec848795eddb8c6d2081e6b9b7 Mon Sep 17 00:00:00 2001 From: Jack Griffin Date: Thu, 3 Sep 2015 01:31:47 +0000 Subject: [PATCH 282/501] Skip creating swift container if already exists Signed-off-by: Jack Griffin --- docs/storage/driver/swift/swift.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 0921ccc0..5871437f 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -165,8 +165,12 @@ func New(params Parameters) (*Driver, error) { return nil, fmt.Errorf("Swift authentication failed: %s", err) } - if err := ct.ContainerCreate(params.Container, nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { + if err := ct.ContainerCreate(params.Container, nil); err != nil { + return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + } + } else if err != nil { + return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) } d := &driver{ From 8dd51d64603b8682222d6d1ce50f4939fdd04c57 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 20 Aug 2015 13:56:36 -0700 Subject: [PATCH 283/501] Move initialization code from main.go to the registry package This makes it easier to embed a registry instance inside another application. Signed-off-by: Aaron Lehmann --- docs/doc.go | 3 +- docs/handlers/api_test.go | 2 +- docs/handlers/app.go | 12 +- docs/handlers/app_test.go | 4 +- docs/handlers/health_test.go | 6 +- docs/registry.go | 294 +++++++++++++++++++++++++++++++++++ 6 files changed, 307 insertions(+), 14 deletions(-) create mode 100644 docs/registry.go diff --git a/docs/doc.go b/docs/doc.go index 1c01e42e..a1ba7f3a 100644 --- a/docs/doc.go +++ b/docs/doc.go @@ -1,3 +1,2 @@ -// Package registry is a placeholder package for registry interface -// definitions and utilities. +// Package registry provides the main entrypoints for running a registry. package registry diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 3473baf5..52a74a2b 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1038,7 +1038,7 @@ func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { ctx := context.Background() - app := NewApp(ctx, *config) + app := NewApp(ctx, config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index b1e46b02..8c67c20b 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -47,7 +47,7 @@ const defaultCheckInterval = 10 * time.Second type App struct { context.Context - Config configuration.Configuration + Config *configuration.Configuration router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. @@ -69,7 +69,7 @@ type App struct { // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. -func NewApp(ctx context.Context, configuration configuration.Configuration) *App { +func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { app := &App{ Config: configuration, Context: ctx, @@ -117,10 +117,10 @@ func NewApp(ctx context.Context, configuration configuration.Configuration) *App panic(err) } - app.configureSecret(&configuration) - app.configureEvents(&configuration) - app.configureRedis(&configuration) - app.configureLogHook(&configuration) + app.configureSecret(configuration) + app.configureEvents(configuration) + app.configureRedis(configuration) + app.configureLogHook(configuration) options := []storage.RegistryOption{} diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 0038a97d..9e2514d8 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -31,7 +31,7 @@ func TestAppDispatcher(t *testing.T) { t.Fatalf("error creating registry: %v", err) } app := &App{ - Config: configuration.Configuration{}, + Config: &configuration.Configuration{}, Context: ctx, router: v2.Router(), driver: driver, @@ -164,7 +164,7 @@ func TestNewApp(t *testing.T) { // Mostly, with this test, given a sane configuration, we are simply // ensuring that NewApp doesn't panic. We might want to tweak this // behavior. - app := NewApp(ctx, config) + app := NewApp(ctx, &config) server := httptest.NewServer(app) builder, err := v2.NewURLBuilderFromString(server.URL) diff --git a/docs/handlers/health_test.go b/docs/handlers/health_test.go index bb460b47..5fe65ede 100644 --- a/docs/handlers/health_test.go +++ b/docs/handlers/health_test.go @@ -23,7 +23,7 @@ func TestFileHealthCheck(t *testing.T) { } defer tmpfile.Close() - config := configuration.Configuration{ + config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, @@ -83,7 +83,7 @@ func TestTCPHealthCheck(t *testing.T) { } }() - config := configuration.Configuration{ + config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, @@ -142,7 +142,7 @@ func TestHTTPHealthCheck(t *testing.T) { } })) - config := configuration.Configuration{ + config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, diff --git a/docs/registry.go b/docs/registry.go new file mode 100644 index 00000000..68525040 --- /dev/null +++ b/docs/registry.go @@ -0,0 +1,294 @@ +package registry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus/formatters/logstash" + "github.com/bugsnag/bugsnag-go" + "github.com/docker/distribution/configuration" + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/health" + "github.com/docker/distribution/registry/handlers" + "github.com/docker/distribution/registry/listener" + "github.com/docker/distribution/uuid" + "github.com/docker/distribution/version" + gorhandlers "github.com/gorilla/handlers" + "github.com/yvasiyarov/gorelic" + "golang.org/x/net/context" +) + +// A Registry represents a complete instance of the registry. +type Registry struct { + config *configuration.Configuration + app *handlers.App + server *http.Server + ln net.Listener + debugLn net.Listener +} + +// NewRegistry creates a new registry from a context and configuration struct. +func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { + // Note this + ctx = ctxu.WithValue(ctx, "version", version.Version) + + var err error + ctx, err = configureLogging(ctx, config) + if err != nil { + return nil, fmt.Errorf("error configuring logger: %v", err) + } + + // inject a logger into the uuid library. warns us if there is a problem + // with uuid generation under low entropy. + uuid.Loggerf = ctxu.GetLogger(ctx).Warnf + + app := handlers.NewApp(ctx, config) + // TODO(aaronl): The global scope of the health checks means NewRegistry + // can only be called once per process. + app.RegisterHealthChecks() + handler := configureReporting(app) + handler = alive("/", handler) + handler = health.Handler(handler) + handler = panicHandler(handler) + handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) + + server := &http.Server{ + Handler: handler, + } + + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) + if err != nil { + return nil, err + } + + var debugLn net.Listener + if config.HTTP.Debug.Addr != "" { + debugLn, err = listener.NewListener("tcp", config.HTTP.Debug.Addr) + if err != nil { + return nil, fmt.Errorf("error listening on debug interface: %v", err) + } + log.Infof("debug server listening %v", config.HTTP.Debug.Addr) + } + + if config.HTTP.TLS.Certificate != "" { + tlsConf := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"http/1.1"}, + Certificates: make([]tls.Certificate, 1), + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return nil, err + } + + if len(config.HTTP.TLS.ClientCAs) != 0 { + pool := x509.NewCertPool() + + for _, ca := range config.HTTP.TLS.ClientCAs { + caPem, err := ioutil.ReadFile(ca) + if err != nil { + return nil, err + } + + if ok := pool.AppendCertsFromPEM(caPem); !ok { + return nil, fmt.Errorf("Could not add CA to pool") + } + } + + for _, subj := range pool.Subjects() { + ctxu.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + } + + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + tlsConf.ClientCAs = pool + } + + ln = tls.NewListener(ln, tlsConf) + ctxu.GetLogger(app).Infof("listening on %v, tls", ln.Addr()) + } else { + ctxu.GetLogger(app).Infof("listening on %v", ln.Addr()) + } + + return &Registry{ + app: app, + config: config, + server: server, + ln: ln, + debugLn: debugLn, + }, nil +} + +// Serve runs the registry's HTTP server(s). +func (registry *Registry) Serve() error { + defer registry.ln.Close() + + errChan := make(chan error) + + if registry.debugLn != nil { + defer registry.debugLn.Close() + go func() { + errChan <- http.Serve(registry.debugLn, nil) + }() + } + + go func() { + errChan <- registry.server.Serve(registry.ln) + }() + + return <-errChan +} + +func configureReporting(app *handlers.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = app.Config.Reporting.NewRelic.Verbose + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} + +// configureLogging prepares the context with a logger using the +// configuration. +func configureLogging(ctx ctxu.Context, config *configuration.Configuration) (context.Context, error) { + if config.Log.Level == "" && config.Log.Formatter == "" { + // If no config for logging is set, fallback to deprecated "Loglevel". + log.SetLevel(logLevel(config.Loglevel)) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) + return ctx, nil + } + + log.SetLevel(logLevel(config.Log.Level)) + + formatter := config.Log.Formatter + if formatter == "" { + formatter = "text" // default formatter + } + + switch formatter { + case "json": + log.SetFormatter(&log.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "text": + log.SetFormatter(&log.TextFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + case "logstash": + log.SetFormatter(&logstash.LogstashFormatter{ + TimestampFormat: time.RFC3339Nano, + }) + default: + // just let the library use default on empty string. + if config.Log.Formatter != "" { + return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) + } + } + + if config.Log.Formatter != "" { + log.Debugf("using %q logging formatter", config.Log.Formatter) + } + + // log the application version with messages + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) + + if len(config.Log.Fields) > 0 { + // build up the static fields, if present. + var fields []interface{} + for k := range config.Log.Fields { + fields = append(fields, k) + } + + ctx = ctxu.WithValues(ctx, config.Log.Fields) + ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, fields...)) + } + + return ctx, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + l = log.InfoLevel + log.Warnf("error parsing level %q: %v, using %q ", level, err, l) + } + + return l +} + +// panicHandler add a HTTP handler to web app. The handler recover the happening +// panic. logrus.Panic transmits panic message to pre-config log hooks, which is +// defined in config.yml. +func panicHandler(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + log.Panic(fmt.Sprintf("%v", err)) + } + }() + handler.ServeHTTP(w, r) + }) +} + +// alive simply wraps the handler with a route that always returns an http 200 +// response when the path is matched. If the path is not matched, the request +// is passed to the provided handler. There is no guarantee of anything but +// that the server is up. Wrap with other handlers (such as health.Handler) +// for greater affect. +func alive(path string, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == path { + w.Header().Set("Cache-Control", "no-cache") + w.WriteHeader(http.StatusOK) + return + } + + handler.ServeHTTP(w, r) + }) +} From 045db61784fc401ac278a6ef56bc5cdee04975a4 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 20 Aug 2015 15:43:08 -0700 Subject: [PATCH 284/501] Add a cobra command that implements the entire main function for registry Use this command in cmd/registry/main.go. Move debug server to the main command, and change Serve to be a ListenAndServe function. Signed-off-by: Aaron Lehmann --- docs/registry.go | 145 +++++++++++++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 49 deletions(-) diff --git a/docs/registry.go b/docs/registry.go index 68525040..28a8ae18 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "fmt" "io/ioutil" - "net" "net/http" "os" "time" @@ -21,17 +20,61 @@ import ( "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" + "github.com/spf13/cobra" "github.com/yvasiyarov/gorelic" "golang.org/x/net/context" ) +// Cmd is a cobra command for running the registry. +var Cmd = &cobra.Command{ + Use: "registry ", + Short: "registry stores and distributes Docker images", + Long: "registry stores and distributes Docker images.", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + if config.HTTP.Debug.Addr != "" { + go func(addr string) { + log.Infof("debug server listening %v", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("error listening on debug interface: %v", err) + } + }(config.HTTP.Debug.Addr) + } + + registry, err := NewRegistry(context.Background(), config) + if err != nil { + log.Fatalln(err) + } + + if err = registry.ListenAndServe(); err != nil { + log.Fatalln(err) + } + }, +} + +var showVersion bool + +func init() { + Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + // A Registry represents a complete instance of the registry. +// TODO(aaronl): It might make sense for Registry to become an interface. type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server - ln net.Listener - debugLn net.Listener + config *configuration.Configuration + app *handlers.App + server *http.Server } // NewRegistry creates a new registry from a context and configuration struct. @@ -63,18 +106,20 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg Handler: handler, } + return &Registry{ + app: app, + config: config, + server: server, + }, nil +} + +// ListenAndServe runs the registry's HTTP server. +func (registry *Registry) ListenAndServe() error { + config := registry.config + ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) if err != nil { - return nil, err - } - - var debugLn net.Listener - if config.HTTP.Debug.Addr != "" { - debugLn, err = listener.NewListener("tcp", config.HTTP.Debug.Addr) - if err != nil { - return nil, fmt.Errorf("error listening on debug interface: %v", err) - } - log.Infof("debug server listening %v", config.HTTP.Debug.Addr) + return err } if config.HTTP.TLS.Certificate != "" { @@ -98,7 +143,7 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) if err != nil { - return nil, err + return err } if len(config.HTTP.TLS.ClientCAs) != 0 { @@ -107,16 +152,16 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { - return nil, err + return err } if ok := pool.AppendCertsFromPEM(caPem); !ok { - return nil, fmt.Errorf("Could not add CA to pool") + return fmt.Errorf("Could not add CA to pool") } } for _, subj := range pool.Subjects() { - ctxu.GetLogger(app).Debugf("CA Subject: %s", string(subj)) + ctxu.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert @@ -124,38 +169,12 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg } ln = tls.NewListener(ln, tlsConf) - ctxu.GetLogger(app).Infof("listening on %v, tls", ln.Addr()) + ctxu.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) } else { - ctxu.GetLogger(app).Infof("listening on %v", ln.Addr()) + ctxu.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } - return &Registry{ - app: app, - config: config, - server: server, - ln: ln, - debugLn: debugLn, - }, nil -} - -// Serve runs the registry's HTTP server(s). -func (registry *Registry) Serve() error { - defer registry.ln.Close() - - errChan := make(chan error) - - if registry.debugLn != nil { - defer registry.debugLn.Close() - go func() { - errChan <- http.Serve(registry.debugLn, nil) - }() - } - - go func() { - errChan <- registry.server.Serve(registry.ln) - }() - - return <-errChan + return registry.server.Serve(ln) } func configureReporting(app *handlers.App) http.Handler { @@ -292,3 +311,31 @@ func alive(path string, handler http.Handler) http.Handler { handler.ServeHTTP(w, r) }) } + +func resolveConfiguration(args []string) (*configuration.Configuration, error) { + var configurationPath string + + if len(args) > 0 { + configurationPath = args[0] + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + defer fp.Close() + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} From 6403bf64d56144417a1a056439866fb8c1d31918 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 11 Sep 2015 09:54:15 -0700 Subject: [PATCH 285/501] Only use the distribution/context package in registry.go This solves a issue from #909 where instance.id was not printed in logs, because this file was using the background context from golang.org/x/net/context instead of github.com/docker/distribution/context. It's cleaner to standardize on one package, so this commit removes the import of golang.org/x/net/context entirely. The Context interfaces defined in both packages are the same, so other code using golang.org/x/net/context can still pass its context to NewRegistry. Signed-off-by: Aaron Lehmann --- docs/registry.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/registry.go b/docs/registry.go index 28a8ae18..cb0c8765 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -13,7 +13,7 @@ import ( "github.com/Sirupsen/logrus/formatters/logstash" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/registry/handlers" "github.com/docker/distribution/registry/listener" @@ -22,7 +22,6 @@ import ( gorhandlers "github.com/gorilla/handlers" "github.com/spf13/cobra" "github.com/yvasiyarov/gorelic" - "golang.org/x/net/context" ) // Cmd is a cobra command for running the registry. @@ -80,7 +79,7 @@ type Registry struct { // NewRegistry creates a new registry from a context and configuration struct. func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { // Note this - ctx = ctxu.WithValue(ctx, "version", version.Version) + ctx = context.WithValue(ctx, "version", version.Version) var err error ctx, err = configureLogging(ctx, config) @@ -90,7 +89,7 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. - uuid.Loggerf = ctxu.GetLogger(ctx).Warnf + uuid.Loggerf = context.GetLogger(ctx).Warnf app := handlers.NewApp(ctx, config) // TODO(aaronl): The global scope of the health checks means NewRegistry @@ -161,7 +160,7 @@ func (registry *Registry) ListenAndServe() error { } for _, subj := range pool.Subjects() { - ctxu.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert @@ -169,9 +168,9 @@ func (registry *Registry) ListenAndServe() error { } ln = tls.NewListener(ln, tlsConf) - ctxu.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) } else { - ctxu.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } return registry.server.Serve(ln) @@ -215,11 +214,11 @@ func configureReporting(app *handlers.App) http.Handler { // configureLogging prepares the context with a logger using the // configuration. -func configureLogging(ctx ctxu.Context, config *configuration.Configuration) (context.Context, error) { +func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { if config.Log.Level == "" && config.Log.Formatter == "" { // If no config for logging is set, fallback to deprecated "Loglevel". log.SetLevel(logLevel(config.Loglevel)) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) return ctx, nil } @@ -255,7 +254,7 @@ func configureLogging(ctx ctxu.Context, config *configuration.Configuration) (co } // log the application version with messages - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "version")) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) if len(config.Log.Fields) > 0 { // build up the static fields, if present. @@ -264,8 +263,8 @@ func configureLogging(ctx ctxu.Context, config *configuration.Configuration) (co fields = append(fields, k) } - ctx = ctxu.WithValues(ctx, config.Log.Fields) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, fields...)) + ctx = context.WithValues(ctx, config.Log.Fields) + ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) } return ctx, nil From cabf1fd236717f8431d421a8e512018cce7b5caf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 10 Sep 2015 20:40:01 -0700 Subject: [PATCH 286/501] Allow interface{} keys when using logger Signed-off-by: Stephen J Day --- docs/storage/blobwriter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index e0e7239c..b384fa8a 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -241,7 +241,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri if !verified { context.GetLoggerWithFields(ctx, - map[string]interface{}{ + map[interface{}]interface{}{ "canonical": canonical, "provided": desc.Digest, }, "canonical", "provided"). From 49f080acc8d4979eb2a1640111c74e31059b9b94 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 10 Sep 2015 20:41:58 -0700 Subject: [PATCH 287/501] Add WithVersion to context and other cleanup By adding WithVersion to the context package, we can simplify context setup in the application. This avoids some odd bugs where instantiation order can lead to missing instance.id or version from log messages. Signed-off-by: Stephen J Day --- docs/handlers/app.go | 2 -- docs/registry.go | 13 +++++-------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 8c67c20b..5103c5fb 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -77,8 +77,6 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap isCache: configuration.Proxy.RemoteURL != "", } - app.Context = ctxu.WithLogger(app.Context, ctxu.GetLogger(app, "instance.id")) - // Register the handler dispatchers. app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) diff --git a/docs/registry.go b/docs/registry.go index cb0c8765..86cb6a17 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -35,6 +35,9 @@ var Cmd = &cobra.Command{ return } + // setup context + ctx := context.WithVersion(context.Background(), version.Version) + config, err := resolveConfiguration(args) if err != nil { fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) @@ -51,7 +54,7 @@ var Cmd = &cobra.Command{ }(config.HTTP.Debug.Addr) } - registry, err := NewRegistry(context.Background(), config) + registry, err := NewRegistry(ctx, config) if err != nil { log.Fatalln(err) } @@ -78,9 +81,6 @@ type Registry struct { // NewRegistry creates a new registry from a context and configuration struct. func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - // Note this - ctx = context.WithValue(ctx, "version", version.Version) - var err error ctx, err = configureLogging(ctx, config) if err != nil { @@ -218,7 +218,7 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) if config.Log.Level == "" && config.Log.Formatter == "" { // If no config for logging is set, fallback to deprecated "Loglevel". log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) + ctx = context.WithLogger(ctx, context.GetLogger(ctx)) return ctx, nil } @@ -253,9 +253,6 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) log.Debugf("using %q logging formatter", config.Log.Formatter) } - // log the application version with messages - ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version")) - if len(config.Log.Fields) > 0 { // build up the static fields, if present. var fields []interface{} From b8a1ec4155ffe83bd147cfdbac1d113111aa3e8e Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 18 Sep 2015 11:00:44 -0700 Subject: [PATCH 288/501] Avoid returning nil, nil when fetching a manifest by tag by introducing a new error ErrManifestNotModified which can be checked by clients. Signed-off-by: Richard Scothern --- docs/client/repository.go | 2 +- docs/client/repository_test.go | 7 ++----- docs/proxy/proxymanifeststore.go | 4 ++-- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 2d198314..0fcb17dc 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -288,7 +288,7 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { - return nil, nil + return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { var sm schema1.SignedManifest decoder := json.NewDecoder(resp.Body) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index b211b1f9..6e4a017e 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -603,13 +603,10 @@ func TestManifestFetchWithEtag(t *testing.T) { t.Fatal(err) } - m2, err := ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) - if err != nil { + _, err = ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + if err != distribution.ErrManifestNotModified { t.Fatal(err) } - if m2 != nil { - t.Fatal("Expected empty manifest for matching etag") - } } func TestManifestDelete(t *testing.T) { diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 1400cf02..610d695e 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -102,11 +102,11 @@ func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.Manif fromremote: var sm *schema1.SignedManifest sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) - if err != nil { + if err != nil && err != distribution.ErrManifestNotModified { return nil, err } - if sm == nil { + if err == distribution.ErrManifestNotModified { context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) return localManifest, nil } From 9fb5fe4fbbf640fe424abf5d4c1613703288060b Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 18 Sep 2015 11:26:34 -0700 Subject: [PATCH 289/501] Don't return a nil array and a nil error if the Tags endpoint cannot be found Signed-off-by: Richard Scothern --- docs/client/repository.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 0fcb17dc..1e189438 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -211,8 +211,6 @@ func (ms *manifests) Tags() ([]string, error) { } return tagsResponse.Tags, nil - } else if resp.StatusCode == http.StatusNotFound { - return nil, nil } return nil, handleErrorResponse(resp) } From 84e7c07c42d75e81cab30c3087b0cbf7d14e02ba Mon Sep 17 00:00:00 2001 From: Nuutti Kotivuori Date: Fri, 18 Sep 2015 20:08:21 +0300 Subject: [PATCH 290/501] Remove initial access check from S3 driver In the S3 storage driver there is currently an initial access permission check by listing the bucket. If this check fails, registry will panic and exit. However, this check is broken in two ways. First of all it strips the final slash from the root directory path, meaning that any access permissions which limit access to a single directory will fail, because S3 treats the path as strict prefix match. Secondly it fails to strip any leading slash that might be present, unlike the other access places, which means that the path used is different as a leading slash is allowed and significant in a filename in S3. Since there is also a periodic health check which correctly checks access permissions and shows the error more cleanly, the best solution seems to be to just remove this initial access check. Signed-off-by: Nuutti Kotivuori --- docs/storage/driver/s3/s3.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 552c221d..46dbcd7f 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -219,12 +219,6 @@ func New(params DriverParameters) (*Driver, error) { } } - // Validate that the given credentials have at least read permissions in the - // given bucket scope. - if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { - return nil, err - } - // TODO Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new s3driver while another one is running on the same bucket. // multis, _, err := bucket.ListMulti("", "") From df9758ba39bb732bc9f3e85f16485e336f37cb6c Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 6 Aug 2015 10:34:35 -0700 Subject: [PATCH 291/501] Add a read-only mode as a configuration option Add "readonly" under the storage/maintenance section. When this is set to true, uploads and deletions will return 503 Service Unavailable errors. Document the parameter and add some unit testing. Signed-off-by: Aaron Lehmann --- docs/api/v2/errors.go | 10 ++++++++ docs/handlers/api_test.go | 48 +++++++++++++++++++++++++++++++++++++ docs/handlers/app.go | 18 ++++++++++---- docs/handlers/blob.go | 2 +- docs/handlers/blobupload.go | 8 +++---- docs/handlers/helpers.go | 14 +++++++++++ docs/handlers/images.go | 4 ++-- 7 files changed, 92 insertions(+), 12 deletions(-) diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index ece52a2c..97cb03e2 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -133,4 +133,14 @@ var ( longer proceed.`, HTTPStatusCode: http.StatusNotFound, }) + + // ErrorCodeMaintenanceMode is returned when an upload can't be + // accepted because the registry is in maintenance mode. + ErrorCodeMaintenanceMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MAINTENANCE_MODE", + Message: "registry in maintenance mode", + Description: `The upload cannot be accepted because the registry + is running read-only in maintenance mode.`, + HTTPStatusCode: http.StatusServiceUnavailable, + }) ) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 52a74a2b..e85ae434 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -633,6 +633,54 @@ func TestDeleteDisabled(t *testing.T) { checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) } +func TestDeleteReadOnly(t *testing.T) { + env := newTestEnv(t, true) + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + if err != nil { + t.Fatalf("Error building blob URL") + } + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) + + env.app.readOnly = true + + resp, err := httpDelete(layerURL) + if err != nil { + t.Fatalf("unexpected error deleting layer: %v", err) + } + + checkResponse(t, "deleting layer in read-only mode", resp, http.StatusServiceUnavailable) +} + +func TestStartPushReadOnly(t *testing.T) { + env := newTestEnv(t, true) + env.app.readOnly = true + + imageName := "foo/bar" + + layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "starting push in read-only mode", resp, http.StatusServiceUnavailable) +} + func httpDelete(url string) (*http.Response, error) { req, err := http.NewRequest("DELETE", url, nil) if err != nil { diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 5103c5fb..d851714a 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -64,6 +64,9 @@ type App struct { // true if this registry is configured as a pull through cache isCache bool + + // true if the registry is in a read-only maintenance mode + readOnly bool } // NewApp takes a configuration and returns a configured app, ready to serve @@ -99,13 +102,18 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap purgeConfig := uploadPurgeDefaultConfig() if mc, ok := configuration.Storage["maintenance"]; ok { - for k, v := range mc { - switch k { - case "uploadpurging": - purgeConfig = v.(map[interface{}]interface{}) + if v, ok := mc["uploadpurging"]; ok { + purgeConfig, ok = v.(map[interface{}]interface{}) + if !ok { + panic("uploadpurging config key must contain additional keys") + } + } + if v, ok := mc["readonly"]; ok { + app.readOnly, ok = v.(bool) + if !ok { + panic("readonly config key must have a boolean value") } } - } startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index 4a923aa5..69c39841 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -35,7 +35,7 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { return handlers.MethodHandler{ "GET": http.HandlerFunc(blobHandler.GetBlob), "HEAD": http.HandlerFunc(blobHandler.GetBlob), - "DELETE": http.HandlerFunc(blobHandler.DeleteBlob), + "DELETE": mutableHandler(blobHandler.DeleteBlob, ctx), } } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index bbb70b59..198a8f67 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -23,12 +23,12 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } handler := http.Handler(handlers.MethodHandler{ - "POST": http.HandlerFunc(buh.StartBlobUpload), + "POST": mutableHandler(buh.StartBlobUpload, ctx), "GET": http.HandlerFunc(buh.GetUploadStatus), "HEAD": http.HandlerFunc(buh.GetUploadStatus), - "PATCH": http.HandlerFunc(buh.PatchBlobData), - "PUT": http.HandlerFunc(buh.PutBlobUploadComplete), - "DELETE": http.HandlerFunc(buh.CancelBlobUpload), + "PATCH": mutableHandler(buh.PatchBlobData, ctx), + "PUT": mutableHandler(buh.PutBlobUploadComplete, ctx), + "DELETE": mutableHandler(buh.CancelBlobUpload, ctx), }) if buh.UUID != "" { diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 5a3c9984..9b462a19 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -7,6 +7,7 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" ) // closeResources closes all the provided resources after running the target @@ -60,3 +61,16 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr return nil } + +// mutableHandler wraps a http.HandlerFunc with a check that the registry is +// not in read-only mode. If it is in read-only mode, the wrapper returns +// v2.ErrorCodeMaintenanceMode to the client. +func mutableHandler(handler http.HandlerFunc, ctx *Context) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if ctx.App.readOnly { + ctx.Errors = append(ctx.Errors, v2.ErrorCodeMaintenanceMode) + return + } + handler(w, r) + } +} diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e1931730..78e36a13 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -34,8 +34,8 @@ func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { return handlers.MethodHandler{ "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), - "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), + "PUT": mutableHandler(imageManifestHandler.PutImageManifest, ctx), + "DELETE": mutableHandler(imageManifestHandler.DeleteImageManifest, ctx), } } From cbf83ecd316fa16c6452fbe3601674bfca18b04a Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 6 Aug 2015 18:02:43 -0700 Subject: [PATCH 292/501] Add an "enabled" parameter under "readonly", and make it as if the mutable handlers don't exist when read-only mode is enabled Signed-off-by: Aaron Lehmann --- docs/api/v2/errors.go | 10 ---------- docs/handlers/api_test.go | 4 ++-- docs/handlers/app.go | 10 ++++++++-- docs/handlers/blob.go | 13 +++++++++---- docs/handlers/blobupload.go | 21 ++++++++++++--------- docs/handlers/helpers.go | 14 -------------- docs/handlers/images.go | 13 +++++++++---- 7 files changed, 40 insertions(+), 45 deletions(-) diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index 97cb03e2..ece52a2c 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -133,14 +133,4 @@ var ( longer proceed.`, HTTPStatusCode: http.StatusNotFound, }) - - // ErrorCodeMaintenanceMode is returned when an upload can't be - // accepted because the registry is in maintenance mode. - ErrorCodeMaintenanceMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MAINTENANCE_MODE", - Message: "registry in maintenance mode", - Description: `The upload cannot be accepted because the registry - is running read-only in maintenance mode.`, - HTTPStatusCode: http.StatusServiceUnavailable, - }) ) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index e85ae434..0a0b264b 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -658,7 +658,7 @@ func TestDeleteReadOnly(t *testing.T) { t.Fatalf("unexpected error deleting layer: %v", err) } - checkResponse(t, "deleting layer in read-only mode", resp, http.StatusServiceUnavailable) + checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) } func TestStartPushReadOnly(t *testing.T) { @@ -678,7 +678,7 @@ func TestStartPushReadOnly(t *testing.T) { } defer resp.Body.Close() - checkResponse(t, "starting push in read-only mode", resp, http.StatusServiceUnavailable) + checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) } func httpDelete(url string) (*http.Response, error) { diff --git a/docs/handlers/app.go b/docs/handlers/app.go index d851714a..b11dc5b6 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -109,9 +109,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } } if v, ok := mc["readonly"]; ok { - app.readOnly, ok = v.(bool) + readOnly, ok := v.(map[interface{}]interface{}) if !ok { - panic("readonly config key must have a boolean value") + panic("readonly config key must contain additional keys") + } + if readOnlyEnabled, ok := readOnly["enabled"]; ok { + app.readOnly, ok = readOnlyEnabled.(bool) + if !ok { + panic("readonly's enabled config key must have a boolean value") + } } } } diff --git a/docs/handlers/blob.go b/docs/handlers/blob.go index 69c39841..fb250acd 100644 --- a/docs/handlers/blob.go +++ b/docs/handlers/blob.go @@ -32,11 +32,16 @@ func blobDispatcher(ctx *Context, r *http.Request) http.Handler { Digest: dgst, } - return handlers.MethodHandler{ - "GET": http.HandlerFunc(blobHandler.GetBlob), - "HEAD": http.HandlerFunc(blobHandler.GetBlob), - "DELETE": mutableHandler(blobHandler.DeleteBlob, ctx), + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(blobHandler.GetBlob), + "HEAD": http.HandlerFunc(blobHandler.GetBlob), } + + if !ctx.readOnly { + mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) + } + + return mhandler } // blobHandler serves http blob requests. diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 198a8f67..1bd33d33 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -22,14 +22,17 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { UUID: getUploadUUID(ctx), } - handler := http.Handler(handlers.MethodHandler{ - "POST": mutableHandler(buh.StartBlobUpload, ctx), - "GET": http.HandlerFunc(buh.GetUploadStatus), - "HEAD": http.HandlerFunc(buh.GetUploadStatus), - "PATCH": mutableHandler(buh.PatchBlobData, ctx), - "PUT": mutableHandler(buh.PutBlobUploadComplete, ctx), - "DELETE": mutableHandler(buh.CancelBlobUpload, ctx), - }) + handler := handlers.MethodHandler{ + "GET": http.HandlerFunc(buh.GetUploadStatus), + "HEAD": http.HandlerFunc(buh.GetUploadStatus), + } + + if !ctx.readOnly { + handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) + handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) + handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) + handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) + } if buh.UUID != "" { state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) @@ -93,7 +96,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } } - handler = closeResources(handler, buh.Upload) + return closeResources(handler, buh.Upload) } return handler diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 9b462a19..5a3c9984 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -7,7 +7,6 @@ import ( ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" ) // closeResources closes all the provided resources after running the target @@ -61,16 +60,3 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr return nil } - -// mutableHandler wraps a http.HandlerFunc with a check that the registry is -// not in read-only mode. If it is in read-only mode, the wrapper returns -// v2.ErrorCodeMaintenanceMode to the client. -func mutableHandler(handler http.HandlerFunc, ctx *Context) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if ctx.App.readOnly { - ctx.Errors = append(ctx.Errors, v2.ErrorCodeMaintenanceMode) - return - } - handler(w, r) - } -} diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 78e36a13..0aeeb6f0 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -32,11 +32,16 @@ func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { imageManifestHandler.Digest = dgst } - return handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), - "PUT": mutableHandler(imageManifestHandler.PutImageManifest, ctx), - "DELETE": mutableHandler(imageManifestHandler.DeleteImageManifest, ctx), + mhandler := handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), } + + if !ctx.readOnly { + mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) + mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) + } + + return mhandler } // imageManifestHandler handles http operations on image manifests. From b045aa2a3d408638ad22589dc7a4e919df074765 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 24 Sep 2015 18:22:23 -0700 Subject: [PATCH 293/501] Avoid importing "testing" in externally-facing code The "testing" package adds some flags in its init function, so utilities that import distribution code may print a page of extra testing flags in their help output. This commit solves the issue by moving an import of "testing" in the registry/storage/cache package to a new registry/storage/cache/cachecheck package, which is only imported by tests. Signed-off-by: Aaron Lehmann --- docs/storage/cache/{ => cachecheck}/suite.go | 11 ++++++----- docs/storage/cache/memory/memory_test.go | 4 ++-- docs/storage/cache/redis/redis_test.go | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) rename docs/storage/cache/{ => cachecheck}/suite.go (93%) diff --git a/docs/storage/cache/suite.go b/docs/storage/cache/cachecheck/suite.go similarity index 93% rename from docs/storage/cache/suite.go rename to docs/storage/cache/cachecheck/suite.go index b5a2f643..ed0f95fd 100644 --- a/docs/storage/cache/suite.go +++ b/docs/storage/cache/cachecheck/suite.go @@ -1,4 +1,4 @@ -package cache +package cachecheck import ( "testing" @@ -6,19 +6,20 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/storage/cache" ) // CheckBlobDescriptorCache takes a cache implementation through a common set // of operations. If adding new tests, please add them here so new // implementations get the benefit. This should be used for unit tests. -func CheckBlobDescriptorCache(t *testing.T, provider BlobDescriptorCacheProvider) { +func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { ctx := context.Background() checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) checkBlobDescriptorCacheSetAndRead(t, ctx, provider) } -func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty store: %v", err) } @@ -56,7 +57,7 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, } } -func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc") expected := distribution.Descriptor{ Digest: "sha256:abc", @@ -140,7 +141,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi } } -func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) { +func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc") expected := distribution.Descriptor{ Digest: "sha256:abc", diff --git a/docs/storage/cache/memory/memory_test.go b/docs/storage/cache/memory/memory_test.go index 3bae7ccb..49c2b5c3 100644 --- a/docs/storage/cache/memory/memory_test.go +++ b/docs/storage/cache/memory/memory_test.go @@ -3,11 +3,11 @@ package memory import ( "testing" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/cachecheck" ) // TestInMemoryBlobInfoCache checks the in memory implementation is working // correctly. func TestInMemoryBlobInfoCache(t *testing.T) { - cache.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) + cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) } diff --git a/docs/storage/cache/redis/redis_test.go b/docs/storage/cache/redis/redis_test.go index ed6944a1..81bcaddd 100644 --- a/docs/storage/cache/redis/redis_test.go +++ b/docs/storage/cache/redis/redis_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/cachecheck" "github.com/garyburd/redigo/redis" ) @@ -47,5 +47,5 @@ func TestRedisBlobDescriptorCacheProvider(t *testing.T) { t.Fatalf("unexpected error flushing redis db: %v", err) } - cache.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) + cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } From fa4c33f5f3b02f95869ae374015387a08284b8b8 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 28 Sep 2015 10:41:18 -0700 Subject: [PATCH 294/501] [api spec] Update authN and authZ errors Associate HTTP 401s with Authentication errors rather than Authorization errors. Changes the meaning of the UNAUTHORIZED error to be authentication specific. Defines DENIED error code to be associated with authorization errors which result in HTTP 403 responses. Add 'No Such Repository' errors to more endpoints. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/api/errcode/register.go | 22 +++- docs/api/v2/descriptors.go | 237 ++++++++++++----------------------- 2 files changed, 100 insertions(+), 159 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index f3062ffa..01c34384 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -33,16 +33,28 @@ var ( HTTPStatusCode: http.StatusMethodNotAllowed, }) - // ErrorCodeUnauthorized is returned if a request is not authorized. + // ErrorCodeUnauthorized is returned if a request requires + // authentication. ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ Value: "UNAUTHORIZED", - Message: "access to the requested resource is not authorized", - Description: `The access controller denied access for the operation on - a resource. Often this will be accompanied by a 401 Unauthorized - response status.`, + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, HTTPStatusCode: http.StatusUnauthorized, }) + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + // ErrorCodeUnavailable provides a common error to report unavialability // of a service or endpoint. ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index c5630fed..c8270308 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -111,45 +111,67 @@ var ( }, } - unauthorizedResponse = ResponseDescriptor{ - Description: "The client does not have access to the repository.", + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", Headers: []ParameterDescriptor{ authChallengeHeader, { Name: "Content-Length", Type: "integer", - Description: "Length of the JSON error response body.", + Description: "Length of the JSON response body.", Format: "", }, }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, }, } - unauthorizedResponsePush = ResponseDescriptor{ - Description: "The client does not have access to push to the repository.", - StatusCode: http.StatusUnauthorized, + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", Headers: []ParameterDescriptor{ - authChallengeHeader, { Name: "Content-Length", Type: "integer", - Description: "Length of the JSON error response body.", + Description: "Length of the JSON response body.", Format: "", }, }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", - Format: unauthorizedErrorsBody, + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, }, } ) @@ -345,7 +367,7 @@ var routeDescriptors = []RouteDescriptor{ Name: RouteNameBase, Path: "/v2/", Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, Methods: []MethodDescriptor{ { Method: "GET", @@ -363,24 +385,11 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - Description: "The client is not authorized to access the registry.", - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - }, { Description: "The registry does not implement the V2 API.", StatusCode: http.StatusNotFound, }, + unauthorizedResponseDescriptor, }, }, }, @@ -432,28 +441,9 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -487,28 +477,9 @@ var routeDescriptors = []RouteDescriptor{ }, }, Failures: []ResponseDescriptor{ - { - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -560,29 +531,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have access to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - }, - { - Description: "The named manifest is not known to the registry.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -637,17 +588,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, }, - { - StatusCode: http.StatusUnauthorized, - Description: "The client does not have permission to push to the repository.", - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", @@ -670,25 +613,6 @@ var routeDescriptors = []RouteDescriptor{ }`, }, }, - { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, { Name: "Not allowed", Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", @@ -733,25 +657,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - { - StatusCode: http.StatusUnauthorized, - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON error response body.", - Format: "", - }, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", @@ -845,7 +753,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, @@ -858,6 +765,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -914,7 +824,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ @@ -930,6 +839,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -993,6 +905,9 @@ var routeDescriptors = []RouteDescriptor{ errcode.ErrorCodeUnsupported, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1066,7 +981,6 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeNameInvalid, }, }, - unauthorizedResponsePush, { Name: "Not allowed", Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", @@ -1075,6 +989,9 @@ var routeDescriptors = []RouteDescriptor{ errcode.ErrorCodeUnsupported, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -1118,7 +1035,9 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeNameInvalid, }, }, - unauthorizedResponsePush, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1177,7 +1096,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1189,6 +1107,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1249,7 +1170,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1261,6 +1181,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, { @@ -1328,7 +1251,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1344,6 +1266,9 @@ var routeDescriptors = []RouteDescriptor{ Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1420,7 +1345,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponsePush, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, @@ -1432,6 +1356,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, @@ -1474,7 +1401,6 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, - unauthorizedResponse, { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, @@ -1486,6 +1412,9 @@ var routeDescriptors = []RouteDescriptor{ Format: errorsBody, }, }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, }, }, }, From 6573d5c119d81e68adc49b865bd9dc39445ca369 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 18 Sep 2015 11:03:15 -0700 Subject: [PATCH 295/501] Add http.host parameter This allows the administrator to specify an externally-reachable URL for the registry. It takes precedence over the X-Forwarded-Proto and X-Forwarded-Host headers, and the hostname in the request. Signed-off-by: Aaron Lehmann --- docs/api/v2/urls_test.go | 40 ++++++++++++++++++++++++++++++++++------ docs/handlers/app.go | 27 ++++++++++++++++++++++++--- 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 1113a7dd..61d41547 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -158,8 +158,9 @@ func TestBuilderFromRequest(t *testing.T) { forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") testRequests := []struct { - request *http.Request - base string + request *http.Request + base string + configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, @@ -177,10 +178,23 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, base: "http://first.example.com", }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, + base: "https://third.example.com:5000", + configHost: url.URL{ + Scheme: "https", + Host: "third.example.com:5000", + }, + }, } for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost) + } else { + builder = NewURLBuilderFromRequest(tr.request) + } for _, testCase := range makeURLBuilderTestCases(builder) { url, err := testCase.build() @@ -207,8 +221,9 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { forwardedProtoHeader.Set("X-Forwarded-Proto", "https") testRequests := []struct { - request *http.Request - base string + request *http.Request + base string + configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, @@ -218,10 +233,23 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://subdomain.example.com/prefix/", + configHost: url.URL{ + Scheme: "https", + Host: "subdomain.example.com/prefix", + }, + }, } for _, tr := range testRequests { - builder := NewURLBuilderFromRequest(tr.request) + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost) + } else { + builder = NewURLBuilderFromRequest(tr.request) + } for _, testCase := range makeURLBuilderTestCases(builder) { url, err := testCase.build() diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 5103c5fb..f2f6ad9d 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "net/http" + "net/url" "os" "time" @@ -54,6 +55,10 @@ type App struct { registry distribution.Namespace // registry is the primary registry backend for the app instance. accessController auth.AccessController // main access controller for application + // httpHost is a parsed representation of the http.host parameter from + // the configuration. Only the Scheme and Host fields are used. + httpHost url.URL + // events contains notification related configuration. events struct { sink notifications.Sink @@ -120,6 +125,14 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap app.configureRedis(configuration) app.configureLogHook(configuration) + if configuration.HTTP.Host != "" { + u, err := url.Parse(configuration.HTTP.Host) + if err != nil { + panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) + } + app.httpHost = *u + } + options := []storage.RegistryOption{} if app.isCache { @@ -639,9 +652,17 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { "vars.uuid")) context := &Context{ - App: app, - Context: ctx, - urlBuilder: v2.NewURLBuilderFromRequest(r), + App: app, + Context: ctx, + } + + if app.httpHost.Scheme != "" && app.httpHost.Host != "" { + // A "host" item in the configuration takes precedence over + // X-Forwarded-Proto and X-Forwarded-Host headers, and the + // hostname in the request. + context.urlBuilder = v2.NewURLBuilder(&app.httpHost) + } else { + context.urlBuilder = v2.NewURLBuilderFromRequest(r) } return context From d5ca577ad1fbc81ff10704336cc0de2167e6c8b2 Mon Sep 17 00:00:00 2001 From: Matt Moore Date: Wed, 7 Oct 2015 16:06:53 -0700 Subject: [PATCH 296/501] Allow hostname components in component names. Fixes https://github.com/docker/distribution/issues/1062 This relaxes the naming restrictions places on Docker images to permit valid hostnames according to [RFC-2396](https://www.ietf.org/rfc/rfc2396.txt). It deviates from the RFC in the following ways: 1) Allow underscores where we allow hyphens (hostnames don't allow underscores, which we must for backwards compatibility). 2) Leave "top-level" name segments unrestricted (domains require an alpha character to begin a top-level domain, e.g. "com"). 3) DO NOT allow a trailing dot, as permitted by FQDNs. Signed-off-by: Matt Moore --- docs/api/v2/names.go | 19 ++++++++++++++--- docs/api/v2/names_test.go | 45 ++++++++++++++++++++++++++++++--------- 2 files changed, 51 insertions(+), 13 deletions(-) diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go index 14b7ea60..5f340793 100644 --- a/docs/api/v2/names.go +++ b/docs/api/v2/names.go @@ -15,10 +15,23 @@ const ( RepositoryNameTotalLengthMax = 255 ) +// domainLabelRegexp represents the following RFC-2396 BNF construct: +// domainlabel = alphanum | alphanum *( alphanum | "-" ) alphanum +var domainLabelRegexp = regexp.MustCompile(`[a-z0-9](?:-*[a-z0-9])*`) + // RepositoryNameComponentRegexp restricts registry path component names to -// start with at least one letter or number, with following parts able to -// be separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) +// the allow valid hostnames according to: https://www.ietf.org/rfc/rfc2396.txt +// with the following differences: +// 1) It DOES NOT allow for fully-qualified domain names, which include a +// trailing '.', e.g. "google.com." +// 2) It DOES NOT restrict 'top-level' domain labels to start with just alpha +// characters. +// 3) It DOES allow for underscores to appear in the same situations as dots. +// +// RFC-2396 uses the BNF construct: +// hostname = *( domainlabel "." ) toplabel [ "." ] +var RepositoryNameComponentRegexp = regexp.MustCompile( + domainLabelRegexp.String() + `(?:[._]` + domainLabelRegexp.String() + `)*`) // RepositoryNameComponentAnchoredRegexp is the version of // RepositoryNameComponentRegexp which must completely match the content diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go index 656ae846..f4daf2e7 100644 --- a/docs/api/v2/names_test.go +++ b/docs/api/v2/names_test.go @@ -164,22 +164,47 @@ var ( err: ErrRepositoryNameComponentInvalid, invalid: true, }, + { + input: "do__cker/docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "docker./docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: ".docker/docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "do..cker/docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "docker-/docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, + { + input: "-docker/docker", + err: ErrRepositoryNameComponentInvalid, + invalid: true, + }, { input: "b.gcr.io/test.example.com/my-app", // embedded domain component }, - // TODO(stevvooe): The following is a punycode domain name that we may - // want to allow in the future. Currently, this is not allowed but we - // may want to change this in the future. Adding this here as invalid - // for the time being. { - input: "xn--n3h.com/myimage", // http://☃.com in punycode - err: ErrRepositoryNameComponentInvalid, - invalid: true, + input: "xn--n3h.com/myimage", // http://☃.com in punycode }, { - input: "xn--7o8h.com/myimage", // http://🐳.com in punycode - err: ErrRepositoryNameComponentInvalid, - invalid: true, + input: "xn--7o8h.com/myimage", // http://🐳.com in punycode + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", // image with hostname }, } ) From 36fa22c82157c2ed148712cf7200bb24697167ca Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 18 Sep 2015 16:11:35 -0700 Subject: [PATCH 297/501] Fix a race condition in pull through cache population by removing the functionality of readers joining current downloads. Concurrent requests for the same blob will not block, but only the first instance will be comitted locally. Signed-off-by: Richard Scothern --- docs/proxy/proxyblobstore.go | 206 ++++++++++++--------------- docs/proxy/proxyblobstore_test.go | 227 ++++++++++++++++++++++-------- docs/proxy/proxyregistry.go | 2 +- 3 files changed, 260 insertions(+), 175 deletions(-) diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index b480a111..976dc8d7 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -22,15 +22,10 @@ type proxyBlobStore struct { scheduler *scheduler.TTLExpirationScheduler } -var _ distribution.BlobStore = proxyBlobStore{} - -type inflightBlob struct { - refCount int - bw distribution.BlobWriter -} +var _ distribution.BlobStore = &proxyBlobStore{} // inflight tracks currently downloading blobs -var inflight = make(map[digest.Digest]*inflightBlob) +var inflight = make(map[digest.Digest]struct{}) // mu protects inflight var mu sync.Mutex @@ -42,140 +37,113 @@ func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, d w.Header().Set("Etag", digest.String()) } -func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - desc, err := pbs.localStore.Stat(ctx, dgst) - if err != nil && err != distribution.ErrBlobUnknown { - return err - } - - if err == nil { - proxyMetrics.BlobPush(uint64(desc.Size)) - return pbs.localStore.ServeBlob(ctx, w, r, dgst) - } - - desc, err = pbs.remoteStore.Stat(ctx, dgst) +func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { + desc, err := pbs.remoteStore.Stat(ctx, dgst) if err != nil { - return err + return distribution.Descriptor{}, err + } + + if w, ok := writer.(http.ResponseWriter); ok { + setResponseHeaders(w, desc.Size, desc.MediaType, dgst) } remoteReader, err := pbs.remoteStore.Open(ctx, dgst) if err != nil { - return err + return distribution.Descriptor{}, err } - bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) + _, err = io.CopyN(writer, remoteReader, desc.Size) + if err != nil { + return distribution.Descriptor{}, err + } + + proxyMetrics.BlobPush(uint64(desc.Size)) + + return desc, nil +} + +func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { + localDesc, err := pbs.localStore.Stat(ctx, dgst) + if err != nil { + // Stat can report a zero sized file here if it's checked between creation + // and population. Return nil error, and continue + return false, nil + } + + if err == nil { + proxyMetrics.BlobPush(uint64(localDesc.Size)) + return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) + } + + return false, nil + +} + +func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { + defer func() { + mu.Lock() + delete(inflight, dgst) + mu.Unlock() + }() + + var desc distribution.Descriptor + var err error + var bw distribution.BlobWriter + + bw, err = pbs.localStore.Create(ctx) if err != nil { return err } - defer cleanup() - if isNew { - go func() { - err := streamToStorage(ctx, remoteReader, desc, bw) - if err != nil { - context.GetLogger(ctx).Error(err) - } + desc, err = pbs.copyContent(ctx, dgst, bw) + if err != nil { + return err + } - proxyMetrics.BlobPull(uint64(desc.Size)) - }() - err := streamToClient(ctx, w, desc, bw) - if err != nil { - return err - } + _, err = bw.Commit(ctx, desc) + if err != nil { + return err + } - proxyMetrics.BlobPush(uint64(desc.Size)) - pbs.scheduler.AddBlob(dgst.String(), blobTTL) + return nil +} + +func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + served, err := pbs.serveLocal(ctx, w, r, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + return err + } + + if served { return nil } - err = streamToClient(ctx, w, desc, bw) - if err != nil { - return err - } - proxyMetrics.BlobPush(uint64(desc.Size)) - return nil -} - -type cleanupFunc func() - -// getOrCreateBlobWriter will track which blobs are currently being downloaded and enable client requesting -// the same blob concurrently to read from the existing stream. -func getOrCreateBlobWriter(ctx context.Context, blobs distribution.BlobService, desc distribution.Descriptor) (distribution.BlobWriter, bool, cleanupFunc, error) { mu.Lock() - defer mu.Unlock() - dgst := desc.Digest - - cleanup := func() { - mu.Lock() - defer mu.Unlock() - inflight[dgst].refCount-- - - if inflight[dgst].refCount == 0 { - defer delete(inflight, dgst) - _, err := inflight[dgst].bw.Commit(ctx, desc) - if err != nil { - // There is a narrow race here where Commit can be called while this blob's TTL is expiring - // and its being removed from storage. In that case, the client stream will continue - // uninterruped and the blob will be pulled through on the next request, so just log it - context.GetLogger(ctx).Errorf("Error committing blob: %q", err) - } - - } - } - - var bw distribution.BlobWriter _, ok := inflight[dgst] if ok { - bw = inflight[dgst].bw - inflight[dgst].refCount++ - return bw, false, cleanup, nil + mu.Unlock() + _, err := pbs.copyContent(ctx, dgst, w) + return err } + inflight[dgst] = struct{}{} + mu.Unlock() - var err error - bw, err = blobs.Create(ctx) - if err != nil { - return nil, false, nil, err - } + go func(dgst digest.Digest) { + if err := pbs.storeLocal(ctx, dgst); err != nil { + context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + } + pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) + }(dgst) - inflight[dgst] = &inflightBlob{refCount: 1, bw: bw} - return bw, true, cleanup, nil -} - -func streamToStorage(ctx context.Context, remoteReader distribution.ReadSeekCloser, desc distribution.Descriptor, bw distribution.BlobWriter) error { - _, err := io.CopyN(bw, remoteReader, desc.Size) + _, err = pbs.copyContent(ctx, dgst, w) if err != nil { return err } - return nil } -func streamToClient(ctx context.Context, w http.ResponseWriter, desc distribution.Descriptor, bw distribution.BlobWriter) error { - setResponseHeaders(w, desc.Size, desc.MediaType, desc.Digest) - - reader, err := bw.Reader() - if err != nil { - return err - } - defer reader.Close() - teeReader := io.TeeReader(reader, w) - buf := make([]byte, 32768, 32786) - var soFar int64 - for { - rd, err := teeReader.Read(buf) - if err == nil || err == io.EOF { - soFar += int64(rd) - if soFar < desc.Size { - // buffer underflow, keep trying - continue - } - return nil - } - return err - } -} - -func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { desc, err := pbs.localStore.Stat(ctx, dgst) if err == nil { return desc, err @@ -189,26 +157,26 @@ func (pbs proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distrib } // Unsupported functions -func (pbs proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { +func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported } -func (pbs proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (pbs *proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } -func (pbs proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { +func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } -func (pbs proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { return nil, distribution.ErrUnsupported } -func (pbs proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { return nil, distribution.ErrUnsupported } -func (pbs proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { +func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index f8845ed3..a88fd8b3 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -1,10 +1,13 @@ package proxy import ( - "fmt" + "io/ioutil" + "math/rand" "net/http" "net/http/httptest" + "sync" "testing" + "time" "github.com/docker/distribution" "github.com/docker/distribution/context" @@ -12,75 +15,119 @@ import ( "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/docker/distribution/registry/storage/driver/filesystem" "github.com/docker/distribution/registry/storage/driver/inmemory" ) +var sbsMu sync.Mutex + type statsBlobStore struct { stats map[string]int blobs distribution.BlobStore } func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + sbsMu.Lock() sbs.stats["put"]++ + sbsMu.Unlock() + return sbs.blobs.Put(ctx, mediaType, p) } func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + sbsMu.Lock() sbs.stats["get"]++ + sbsMu.Unlock() + return sbs.blobs.Get(ctx, dgst) } func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { + sbsMu.Lock() sbs.stats["create"]++ + sbsMu.Unlock() + return sbs.blobs.Create(ctx) } func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + sbsMu.Lock() sbs.stats["resume"]++ + sbsMu.Unlock() + return sbs.blobs.Resume(ctx, id) } func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + sbsMu.Lock() sbs.stats["open"]++ + sbsMu.Unlock() + return sbs.blobs.Open(ctx, dgst) } func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + sbsMu.Lock() sbs.stats["serveblob"]++ + sbsMu.Unlock() + return sbs.blobs.ServeBlob(ctx, w, r, dgst) } func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + + sbsMu.Lock() sbs.stats["stat"]++ + sbsMu.Unlock() + return sbs.blobs.Stat(ctx, dgst) } func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + sbsMu.Lock() sbs.stats["delete"]++ + sbsMu.Unlock() + return sbs.blobs.Delete(ctx, dgst) } type testEnv struct { - inRemote []distribution.Descriptor - store proxyBlobStore - ctx context.Context + numUnique int + inRemote []distribution.Descriptor + store proxyBlobStore + ctx context.Context } -func (te testEnv) LocalStats() *map[string]int { +func (te *testEnv) LocalStats() *map[string]int { + sbsMu.Lock() ls := te.store.localStore.(statsBlobStore).stats + sbsMu.Unlock() return &ls } -func (te testEnv) RemoteStats() *map[string]int { +func (te *testEnv) RemoteStats() *map[string]int { + sbsMu.Lock() rs := te.store.remoteStore.(statsBlobStore).stats + sbsMu.Unlock() return &rs } // Populate remote store and record the digests -func makeTestEnv(t *testing.T, name string) testEnv { +func makeTestEnv(t *testing.T, name string) *testEnv { ctx := context.Background() - localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + truthDir, err := ioutil.TempDir("", "truth") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + cacheDir, err := ioutil.TempDir("", "cache") + if err != nil { + t.Fatalf("unable to create tempdir: %s", err) + } + + // todo: create a tempfile area here + localRegistry, err := storage.NewRegistry(ctx, filesystem.New(truthDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -89,7 +136,7 @@ func makeTestEnv(t *testing.T, name string) testEnv { t.Fatalf("unexpected error getting repo: %v", err) } - truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + truthRegistry, err := storage.NewRegistry(ctx, filesystem.New(cacheDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -116,33 +163,59 @@ func makeTestEnv(t *testing.T, name string) testEnv { scheduler: s, } - te := testEnv{ + te := &testEnv{ store: proxyBlobStore, ctx: ctx, } return te } -func populate(t *testing.T, te *testEnv, blobCount int) { - var inRemote []distribution.Descriptor - for i := 0; i < blobCount; i++ { - bytes := []byte(fmt.Sprintf("blob%d", i)) +func makeBlob(size int) []byte { + blob := make([]byte, size, size) + for i := 0; i < size; i++ { + blob[i] = byte('A' + rand.Int()%48) + } + return blob +} - desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) - if err != nil { - t.Errorf("Put in store") +func init() { + rand.Seed(42) +} + +func perm(m []distribution.Descriptor) []distribution.Descriptor { + for i := 0; i < len(m); i++ { + j := rand.Intn(i + 1) + tmp := m[i] + m[i] = m[j] + m[j] = tmp + } + return m +} + +func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { + var inRemote []distribution.Descriptor + + for i := 0; i < numUnique; i++ { + bytes := makeBlob(size) + for j := 0; j < blobCount/numUnique; j++ { + desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) + if err != nil { + t.Fatalf("Put in store") + } + + inRemote = append(inRemote, desc) } - inRemote = append(inRemote, desc) } te.inRemote = inRemote - + te.numUnique = numUnique } func TestProxyStoreStat(t *testing.T) { te := makeTestEnv(t, "foo/bar") + remoteBlobCount := 1 - populate(t, &te, remoteBlobCount) + populate(t, te, remoteBlobCount, 10, 1) localStats := te.LocalStats() remoteStats := te.RemoteStats() @@ -164,43 +237,91 @@ func TestProxyStoreStat(t *testing.T) { } } -func TestProxyStoreServe(t *testing.T) { +func TestProxyStoreServeHighConcurrency(t *testing.T) { te := makeTestEnv(t, "foo/bar") - remoteBlobCount := 1 - populate(t, &te, remoteBlobCount) + blobSize := 200 + blobCount := 10 + numUnique := 1 + populate(t, te, blobCount, blobSize, numUnique) + numClients := 16 + testProxyStoreServe(t, te, numClients) +} + +func TestProxyStoreServeMany(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + blobSize := 200 + blobCount := 10 + numUnique := 4 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// todo(richardscothern): blobCount must be smaller than num clients +func TestProxyStoreServeBig(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + blobSize := 2 << 20 + blobCount := 4 + numUnique := 2 + populate(t, te, blobCount, blobSize, numUnique) + + numClients := 4 + testProxyStoreServe(t, te, numClients) +} + +// testProxyStoreServe will create clients to consume all blobs +// populated in the truth store +func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { localStats := te.LocalStats() remoteStats := te.RemoteStats() - // Serveblob - pulls through blobs - for _, dr := range te.inRemote { - w := httptest.NewRecorder() - r, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } + var wg sync.WaitGroup - err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) - if err != nil { - t.Fatalf(err.Error()) - } + for i := 0; i < numClients; i++ { + // Serveblob - pulls through blobs + wg.Add(1) + go func() { + defer wg.Done() + for _, remoteBlob := range te.inRemote { + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } - dl, err := digest.FromBytes(w.Body.Bytes()) - if err != nil { - t.Fatalf("Error making digest from blob") - } - if dl != dr.Digest { - t.Errorf("Mismatching blob fetch from proxy") - } + err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) + if err != nil { + t.Fatalf(err.Error()) + } + + bodyBytes := w.Body.Bytes() + localDigest, err := digest.FromBytes(bodyBytes) + if err != nil { + t.Fatalf("Error making digest from blob") + } + if localDigest != remoteBlob.Digest { + t.Fatalf("Mismatching blob fetch from proxy") + } + } + }() } - if (*localStats)["stat"] != remoteBlobCount && (*localStats)["create"] != remoteBlobCount { - t.Fatalf("unexpected local stats") - } - if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { - t.Fatalf("unexpected local stats") + wg.Wait() + + remoteBlobCount := len(te.inRemote) + if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { + t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) } + // Wait for any async storage goroutines to finish + time.Sleep(3 * time.Second) + + remoteStatCount := (*remoteStats)["stat"] + remoteOpenCount := (*remoteStats)["open"] + // Serveblob - blobs come from local for _, dr := range te.inRemote { w := httptest.NewRecorder() @@ -223,15 +344,11 @@ func TestProxyStoreServe(t *testing.T) { } } - // Stat to find local, but no new blobs were created - if (*localStats)["stat"] != remoteBlobCount*2 && (*localStats)["create"] != remoteBlobCount*2 { - t.Fatalf("unexpected local stats") - } + localStats = te.LocalStats() + remoteStats = te.RemoteStats() - // Remote unchanged - if (*remoteStats)["stat"] != remoteBlobCount && (*remoteStats)["open"] != remoteBlobCount { - fmt.Printf("\tlocal=%#v, \n\tremote=%#v\n", localStats, remoteStats) - t.Fatalf("unexpected local stats") + // Ensure remote unchanged + if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { + t.Fatalf("unexpected remote stats: %#v", remoteStats) } - } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index e9dec2f7..8a5f5ef6 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -94,7 +94,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri } return &proxiedRepository{ - blobStore: proxyBlobStore{ + blobStore: &proxyBlobStore{ localStore: localRepo.Blobs(ctx), remoteStore: remoteRepo.Blobs(ctx), scheduler: pr.scheduler, From b72f1fd2e3a596cfa5e14d5c8c87290dd6907faf Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 10 Jul 2015 14:36:04 -0400 Subject: [PATCH 298/501] Add a new reference package abstracting repositories, tags and digests There seems to be a need for a type that represents a way of pointing to an image, irrespective of the implementation. This patch defines a Reference interface and provides 3 implementations: - TagReference: when only a tag is provided - DigestReference: when a digest (according to the digest package) is provided, can include optional tag as well Validation of references are purely syntactic. There is also a strong type for tags, analogous to digests, as well as a strong type for Repository from which clients can access the hostname alone, or the repository name without the hostname, or both together via the String() method. For Repository, the files names.go and names_test.go were moved from the v2 package. Signed-off-by: Tibor Vass --- docs/api/v2/descriptors.go | 15 +- docs/api/v2/names.go | 96 ----------- docs/api/v2/names_test.go | 256 ---------------------------- docs/client/repository.go | 9 +- docs/storage/cache/memory/memory.go | 10 +- docs/storage/cache/redis/redis.go | 8 +- docs/storage/registry.go | 12 +- 7 files changed, 28 insertions(+), 378 deletions(-) delete mode 100644 docs/api/v2/names.go delete mode 100644 docs/api/v2/names_test.go diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index c5630fed..ef37997a 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -5,6 +5,7 @@ import ( "regexp" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" ) @@ -12,7 +13,7 @@ var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", - Format: RepositoryNameRegexp.String(), + Format: reference.RepositoryNameRegexp.String(), Required: true, Description: `Name of the target repository.`, } @@ -20,7 +21,7 @@ var ( referenceParameterDescriptor = ParameterDescriptor{ Name: "reference", Type: "string", - Format: TagNameRegexp.String(), + Format: reference.TagRegexp.String(), Required: true, Description: `Tag or digest of the target manifest.`, } @@ -389,7 +390,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameTags, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", + Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ @@ -517,7 +518,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameManifest, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ @@ -782,7 +783,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlob, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ @@ -1006,7 +1007,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUpload, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", + Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ @@ -1128,7 +1129,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ diff --git a/docs/api/v2/names.go b/docs/api/v2/names.go deleted file mode 100644 index 5f340793..00000000 --- a/docs/api/v2/names.go +++ /dev/null @@ -1,96 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" -) - -// TODO(stevvooe): Move these definitions to the future "reference" package. -// While they are used with v2 definitions, their relevance expands beyond. - -const ( - // RepositoryNameTotalLengthMax is the maximum total number of characters in - // a repository name - RepositoryNameTotalLengthMax = 255 -) - -// domainLabelRegexp represents the following RFC-2396 BNF construct: -// domainlabel = alphanum | alphanum *( alphanum | "-" ) alphanum -var domainLabelRegexp = regexp.MustCompile(`[a-z0-9](?:-*[a-z0-9])*`) - -// RepositoryNameComponentRegexp restricts registry path component names to -// the allow valid hostnames according to: https://www.ietf.org/rfc/rfc2396.txt -// with the following differences: -// 1) It DOES NOT allow for fully-qualified domain names, which include a -// trailing '.', e.g. "google.com." -// 2) It DOES NOT restrict 'top-level' domain labels to start with just alpha -// characters. -// 3) It DOES allow for underscores to appear in the same situations as dots. -// -// RFC-2396 uses the BNF construct: -// hostname = *( domainlabel "." ) toplabel [ "." ] -var RepositoryNameComponentRegexp = regexp.MustCompile( - domainLabelRegexp.String() + `(?:[._]` + domainLabelRegexp.String() + `)*`) - -// RepositoryNameComponentAnchoredRegexp is the version of -// RepositoryNameComponentRegexp which must completely match the content -var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) - -// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow -// multiple path components, separated by a forward slash. -var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) - -// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. -var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) - -// TagNameAnchoredRegexp matches valid tag names, anchored at the start and -// end of the matched string. -var TagNameAnchoredRegexp = regexp.MustCompile("^" + TagNameRegexp.String() + "$") - -var ( - // ErrRepositoryNameEmpty is returned for empty, invalid repository names. - ErrRepositoryNameEmpty = fmt.Errorf("repository name must have at least one component") - - // ErrRepositoryNameLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax - ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) - - // ErrRepositoryNameComponentInvalid is returned when a repository name does - // not match RepositoryNameComponentRegexp - ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) -) - -// ValidateRepositoryName ensures the repository name is valid for use in the -// registry. This function accepts a superset of what might be accepted by -// docker core or docker hub. If the name does not pass validation, an error, -// describing the conditions, is returned. -// -// Effectively, the name should comply with the following grammar: -// -// alpha-numeric := /[a-z0-9]+/ -// separator := /[._-]/ -// component := alpha-numeric [separator alpha-numeric]* -// namespace := component ['/' component]* -// -// The result of the production, known as the "namespace", should be limited -// to 255 characters. -func ValidateRepositoryName(name string) error { - if name == "" { - return ErrRepositoryNameEmpty - } - - if len(name) > RepositoryNameTotalLengthMax { - return ErrRepositoryNameLong - } - - components := strings.Split(name, "/") - - for _, component := range components { - if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { - return ErrRepositoryNameComponentInvalid - } - } - - return nil -} diff --git a/docs/api/v2/names_test.go b/docs/api/v2/names_test.go deleted file mode 100644 index f4daf2e7..00000000 --- a/docs/api/v2/names_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package v2 - -import ( - "strconv" - "strings" - "testing" -) - -var ( - // regexpTestcases is a unified set of testcases for - // TestValidateRepositoryName and TestRepositoryNameRegexp. - // Some of them are valid inputs for one and not the other. - regexpTestcases = []struct { - // input is the repository name or name component testcase - input string - // err is the error expected from ValidateRepositoryName, or nil - err error - // invalid should be true if the testcase is *not* expected to - // match RepositoryNameRegexp - invalid bool - }{ - { - input: "", - err: ErrRepositoryNameEmpty, - }, - { - input: "short", - }, - { - input: "simple/name", - }, - { - input: "library/ubuntu", - }, - { - input: "docker/stevvooe/app", - }, - { - input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - }, - { - input: "aa/aa/bb/bb/bb", - }, - { - input: "a/a/a/b/b", - }, - { - input: "a/a/a/a/", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "a//a/a", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "a", - }, - { - input: "a/aa", - }, - { - input: "aa/a", - }, - { - input: "a/aa/a", - }, - { - input: "foo.com/", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - // TODO: this testcase should be valid once we switch to - // the reference package. - input: "foo.com:8080/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo.com/bar", - }, - { - input: "foo.com/bar/baz", - }, - { - input: "foo.com/bar/baz/quux", - }, - { - input: "blog.foo.com/bar/baz", - }, - { - input: "asdf", - }, - { - input: "asdf$$^/aa", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "aa-a/aa", - }, - { - input: "aa/aa", - }, - { - input: "a-a/a-a", - }, - { - input: "a-/a/a/a", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: strings.Repeat("a", 255), - }, - { - input: strings.Repeat("a", 256), - err: ErrRepositoryNameLong, - }, - { - input: "-foo/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/bar-", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo-/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/-bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "_foo/bar", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "foo/bar_", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "____/____", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "_docker/_docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "docker_/docker_", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "do__cker/docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "docker./docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: ".docker/docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "do..cker/docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "docker-/docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "-docker/docker", - err: ErrRepositoryNameComponentInvalid, - invalid: true, - }, - { - input: "b.gcr.io/test.example.com/my-app", // embedded domain component - }, - { - input: "xn--n3h.com/myimage", // http://☃.com in punycode - }, - { - input: "xn--7o8h.com/myimage", // http://🐳.com in punycode - }, - { - input: "registry.io/foo/project--id.module--name.ver---sion--name", // image with hostname - }, - } -) - -// TestValidateRepositoryName tests the ValidateRepositoryName function, -// which uses RepositoryNameComponentAnchoredRegexp for validation -func TestValidateRepositoryName(t *testing.T) { - for _, testcase := range regexpTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - if err := ValidateRepositoryName(testcase.input); err != testcase.err { - if testcase.err != nil { - if err != nil { - failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) - } else { - failf("expected invalid repository: %v", testcase.err) - } - } else { - if err != nil { - // Wrong error returned. - failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) - } else { - failf("unexpected error validating repository name: %v", err) - } - } - } - } -} - -func TestRepositoryNameRegexp(t *testing.T) { - for _, testcase := range regexpTestcases { - failf := func(format string, v ...interface{}) { - t.Logf(strconv.Quote(testcase.input)+": "+format, v...) - t.Fail() - } - - matches := RepositoryNameRegexp.FindString(testcase.input) == testcase.input - if matches == testcase.invalid { - if testcase.invalid { - failf("expected invalid repository name %s", testcase.input) - } else { - failf("expected valid repository name %s", testcase.input) - } - } - } -} diff --git a/docs/client/repository.go b/docs/client/repository.go index 1e189438..db45a464 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -15,6 +15,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" @@ -96,9 +97,9 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri return numFilled, returnErr } -// NewRepository creates a new Repository for the given repository name and base URL -func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if err := v2.ValidateRepositoryName(name); err != nil { +// NewRepository creates a new Repository for the given canonical repository name and base URL. +func NewRepository(ctx context.Context, canonicalName, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + if _, err := reference.NewRepository(canonicalName); err != nil { return nil, err } @@ -115,7 +116,7 @@ func NewRepository(ctx context.Context, name, baseURL string, transport http.Rou return &repository{ client: client, ub: ub, - name: name, + name: canonicalName, context: ctx, }, nil } diff --git a/docs/storage/cache/memory/memory.go b/docs/storage/cache/memory/memory.go index 120a6572..725a68e7 100644 --- a/docs/storage/cache/memory/memory.go +++ b/docs/storage/cache/memory/memory.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" ) @@ -25,8 +25,8 @@ func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider } } -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRepositoryName(repo); err != nil { +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(canonicalName string) (distribution.BlobDescriptorService, error) { + if _, err := reference.NewRepository(canonicalName); err != nil { return nil, err } @@ -34,9 +34,9 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) defer imbdcp.mu.RUnlock() return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, + repo: canonicalName, parent: imbdcp, - repository: imbdcp.repositories[repo], + repository: imbdcp.repositories[canonicalName], }, nil } diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go index 36370bdd..54138f3d 100644 --- a/docs/storage/cache/redis/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -6,7 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) @@ -40,13 +40,13 @@ func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorC } // RepositoryScoped returns the scoped cache. -func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if err := v2.ValidateRepositoryName(repo); err != nil { +func (rbds *redisBlobDescriptorService) RepositoryScoped(canonicalName string) (distribution.BlobDescriptorService, error) { + if _, err := reference.NewRepository(canonicalName); err != nil { return nil, err } return &repositoryScopedRedisBlobDescriptorService{ - repo: repo, + repo: canonicalName, upstream: rbds, }, nil } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 0b38ea9b..e3b132c5 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -3,7 +3,7 @@ package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -107,10 +107,10 @@ func (reg *registry) Scope() distribution.Scope { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, name string) (distribution.Repository, error) { - if err := v2.ValidateRepositoryName(name); err != nil { +func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { + if _, err := reference.NewRepository(canonicalName); err != nil { return nil, distribution.ErrRepositoryNameInvalid{ - Name: name, + Name: canonicalName, Reason: err, } } @@ -118,7 +118,7 @@ func (reg *registry) Repository(ctx context.Context, name string) (distribution. var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(name) + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) if err != nil { return nil, err } @@ -127,7 +127,7 @@ func (reg *registry) Repository(ctx context.Context, name string) (distribution. return &repository{ ctx: ctx, registry: reg, - name: name, + name: canonicalName, descriptorCache: descriptorCache, }, nil } From 582a0661bf62ef49e2911d6b8ce6d7e6e68e1cf8 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 8 Sep 2015 16:00:48 -0700 Subject: [PATCH 299/501] Update to provide small and clear interfaces Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/api/v2/descriptors.go | 12 ++++++------ docs/api/v2/routes_test.go | 8 ++++++++ docs/client/repository.go | 8 ++++---- docs/storage/cache/memory/memory.go | 8 ++++---- docs/storage/cache/redis/redis.go | 6 +++--- docs/storage/registry.go | 2 +- 6 files changed, 26 insertions(+), 18 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index ef37997a..9cfb2fb5 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -13,7 +13,7 @@ var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", - Format: reference.RepositoryNameRegexp.String(), + Format: reference.NameRegexp.String(), Required: true, Description: `Name of the target repository.`, } @@ -390,7 +390,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameTags, - Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/tags/list", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ @@ -518,7 +518,7 @@ var routeDescriptors = []RouteDescriptor{ }, { Name: RouteNameManifest, - Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ @@ -783,7 +783,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlob, - Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ @@ -1007,7 +1007,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/uploads/", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ @@ -1129,7 +1129,7 @@ var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index b8d724df..f6379977 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -170,6 +170,14 @@ func TestRouter(t *testing.T) { "name": "foo/bar/manifests", }, }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", + Vars: map[string]string{ + "name": "locahost:8080/foo/bar/baz", + "reference": "tag", + }, + }, } checkTestRouter(t, testCases, "", true) diff --git a/docs/client/repository.go b/docs/client/repository.go index db45a464..fc709ded 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -97,9 +97,9 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri return numFilled, returnErr } -// NewRepository creates a new Repository for the given canonical repository name and base URL. -func NewRepository(ctx context.Context, canonicalName, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if _, err := reference.NewRepository(canonicalName); err != nil { +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + if _, err := reference.ParseNamed(name); err != nil { return nil, err } @@ -116,7 +116,7 @@ func NewRepository(ctx context.Context, canonicalName, baseURL string, transport return &repository{ client: client, ub: ub, - name: canonicalName, + name: name, context: ctx, }, nil } diff --git a/docs/storage/cache/memory/memory.go b/docs/storage/cache/memory/memory.go index 725a68e7..68a68f08 100644 --- a/docs/storage/cache/memory/memory.go +++ b/docs/storage/cache/memory/memory.go @@ -25,8 +25,8 @@ func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider } } -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(canonicalName string) (distribution.BlobDescriptorService, error) { - if _, err := reference.NewRepository(canonicalName); err != nil { +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } @@ -34,9 +34,9 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(canonicalNam defer imbdcp.mu.RUnlock() return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: canonicalName, + repo: repo, parent: imbdcp, - repository: imbdcp.repositories[canonicalName], + repository: imbdcp.repositories[repo], }, nil } diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go index 54138f3d..1736756e 100644 --- a/docs/storage/cache/redis/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -40,13 +40,13 @@ func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorC } // RepositoryScoped returns the scoped cache. -func (rbds *redisBlobDescriptorService) RepositoryScoped(canonicalName string) (distribution.BlobDescriptorService, error) { - if _, err := reference.NewRepository(canonicalName); err != nil { +func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { return nil, err } return &repositoryScopedRedisBlobDescriptorService{ - repo: canonicalName, + repo: repo, upstream: rbds, }, nil } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index e3b132c5..1050920a 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -108,7 +108,7 @@ func (reg *registry) Scope() distribution.Scope { // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { - if _, err := reference.NewRepository(canonicalName); err != nil { + if _, err := reference.ParseNamed(canonicalName); err != nil { return nil, distribution.ErrRepositoryNameInvalid{ Name: canonicalName, Reason: err, From 26762a54fe39f8872a5ffffb6ffac319c268ef07 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 14 Sep 2015 21:12:33 -0700 Subject: [PATCH 300/501] Correct unmarshal order for SignedManifest To ensure that we only unmarshal the verified payload into the contained manifest, we first copy the entire incoming buffer into Raw and then unmarshal only the Payload portion of the incoming bytes. If the contents is later verified, the caller can then be sure that the contents of the Manifest fields can be trusted. Signed-off-by: Aaron Lehmann --- docs/client/repository_test.go | 63 +++++++++++++++++++--------------- docs/handlers/api_test.go | 40 ++++++++++++++++----- docs/handlers/images.go | 2 +- 3 files changed, 68 insertions(+), 37 deletions(-) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 6e4a017e..1e6eb25f 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -3,7 +3,6 @@ package client import ( "bytes" "crypto/rand" - "encoding/json" "fmt" "io" "log" @@ -14,8 +13,6 @@ import ( "testing" "time" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -23,6 +20,8 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/testutil" + "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" ) func testServer(rrm testutil.RequestResponseMap) (string, func()) { @@ -420,7 +419,7 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest) { +func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) @@ -431,30 +430,38 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } - m := &schema1.SignedManifest{ - Manifest: schema1.Manifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, + m := schema1.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: manifest.Versioned{ + SchemaVersion: 1, }, } - manifestBytes, err := json.Marshal(m) - if err != nil { - panic(err) - } - dgst, err := digest.FromBytes(manifestBytes) + + pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } - m.Raw = manifestBytes + sm, err := schema1.Sign(&m, pk) + if err != nil { + panic(err) + } - return m, dgst + p, err := sm.Payload() + if err != nil { + panic(err) + } + + dgst, err := digest.FromBytes(p) + if err != nil { + panic(err) + } + + return sm, dgst, p } func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { @@ -551,7 +558,7 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { func TestManifestFetch(t *testing.T) { ctx := context.Background() repo := "test.example.com/repo" - m1, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifest(repo, dgst.String(), m1.Raw, &m) @@ -586,9 +593,9 @@ func TestManifestFetch(t *testing.T) { func TestManifestFetchWithEtag(t *testing.T) { repo := "test.example.com/repo/by/tag" - m1, d1 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifestWithEtag(repo, "latest", m1.Raw, &m, d1.String()) + addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) e, c := testServer(m) defer c() @@ -611,8 +618,8 @@ func TestManifestFetchWithEtag(t *testing.T) { func TestManifestDelete(t *testing.T) { repo := "test.example.com/repo/delete" - _, dgst1 := newRandomSchemaV1Manifest(repo, "latest", 6) - _, dgst2 := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -651,7 +658,7 @@ func TestManifestDelete(t *testing.T) { func TestManifestPut(t *testing.T) { repo := "test.example.com/repo/delete" - m1, dgst := newRandomSchemaV1Manifest(repo, "other", 6) + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -744,7 +751,7 @@ func TestManifestTags(t *testing.T) { func TestManifestUnauthorized(t *testing.T) { repo := "test.example.com/repo" - _, dgst := newRandomSchemaV1Manifest(repo, "latest", 6) + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 52a74a2b..adc7647d 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -760,14 +760,32 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) defer resp.Body.Close() - checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, - v2.ErrorCodeManifestUnverified, v2.ErrorCodeBlobUnknown, v2.ErrorCodeDigestInvalid) + checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestInvalid) expectedCounts := map[errcode.ErrorCode]int{ - v2.ErrorCodeManifestUnverified: 1, - v2.ErrorCodeBlobUnknown: 2, - v2.ErrorCodeDigestInvalid: 2, + v2.ErrorCodeManifestInvalid: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // sign the manifest and still get some interesting errors. + sm, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest with errors", manifestURL, sm) + defer resp.Body.Close() + checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, + v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + v2.ErrorCodeDigestInvalid: 2, } if !reflect.DeepEqual(counts, expectedCounts) { @@ -1426,7 +1444,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { } // Manifest upload - unsignedManifest := &schema1.Manifest{ + m := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, @@ -1434,7 +1452,13 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { Tag: tag, FSLayers: []schema1.FSLayer{}, } - resp := putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + + sm, err := schema1.Sign(m, env.pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + resp := putManifest(t, "putting unsigned manifest", manifestURL, sm) checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Manifest Delete diff --git a/docs/handlers/images.go b/docs/handlers/images.go index e1931730..deb9cf49 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -163,7 +163,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http for _, verificationError := range err { switch verificationError := verificationError.(type) { case distribution.ErrManifestBlobUnknown: - imh.Errors = append(imh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(verificationError.Digest)) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) case distribution.ErrManifestUnverified: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) default: From c40b2e2341565e0e1afd7ae36d096f981a6fd4f6 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Thu, 15 Oct 2015 22:21:14 +0800 Subject: [PATCH 301/501] Redundant digest verification in validateBlob Change-Id: I03764edadae529db2cc3acf7ecca329570f18659 Signed-off-by: Li Yi --- docs/storage/registry.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 1050920a..5ef06d53 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -175,7 +175,8 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, + linkPathFns: manifestLinkPathFns, + resumableDigestEnabled: repo.resumableDigestEnabled, }, }, tagStore: &tagStore{ @@ -219,8 +220,9 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. - linkPathFns: []linkPathFunc{blobLinkPath}, - deleteEnabled: repo.registry.deleteEnabled, + linkPathFns: []linkPathFunc{blobLinkPath}, + deleteEnabled: repo.registry.deleteEnabled, + resumableDigestEnabled: repo.resumableDigestEnabled, } } From 84595fc628757b055892313d545f11fa02015565 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 14 Oct 2015 17:22:52 -0700 Subject: [PATCH 302/501] Simplify proxy scheduler The proxy scheduler implemented its own timer state machine. It's simpler and more efficient to leverage the Go runtime's timer heap by using time.AfterFunc. This commit adds a time.Timer to each scheduler entry, and starts and stops those timers as necessary. Then the mainloop goroutine and its associated logic are not needed. Signed-off-by: Aaron Lehmann --- docs/proxy/scheduler/scheduler.go | 181 +++++++++++-------------- docs/proxy/scheduler/scheduler_test.go | 18 ++- 2 files changed, 84 insertions(+), 115 deletions(-) diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go index 056b148a..6af777cc 100644 --- a/docs/proxy/scheduler/scheduler.go +++ b/docs/proxy/scheduler/scheduler.go @@ -3,13 +3,14 @@ package scheduler import ( "encoding/json" "fmt" + "sync" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) -// onTTLExpiryFunc is called when a repositories' TTL expires +// onTTLExpiryFunc is called when a repository's TTL expires type expiryFunc func(string) error const ( @@ -23,14 +24,14 @@ type schedulerEntry struct { Key string `json:"Key"` Expiry time.Time `json:"ExpiryData"` EntryType int `json:"EntryType"` + + timer *time.Timer } // New returns a new instance of the scheduler func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { return &TTLExpirationScheduler{ - entries: make(map[string]schedulerEntry), - addChan: make(chan schedulerEntry), - stopChan: make(chan bool), + entries: make(map[string]*schedulerEntry), driver: driver, pathToStateFile: path, ctx: ctx, @@ -41,9 +42,9 @@ func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpi // TTLExpirationScheduler is a scheduler used to perform actions // when TTLs expire type TTLExpirationScheduler struct { - entries map[string]schedulerEntry - addChan chan schedulerEntry - stopChan chan bool + sync.Mutex + + entries map[string]*schedulerEntry driver driver.StorageDriver ctx context.Context @@ -55,24 +56,27 @@ type TTLExpirationScheduler struct { onManifestExpire expiryFunc } -// addChan allows more TTLs to be pushed to the scheduler -type addChan chan schedulerEntry - -// stopChan allows the scheduler to be stopped - used for testing. -type stopChan chan bool - // OnBlobExpire is called when a scheduled blob's TTL expires func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + ttles.onBlobExpire = f } // OnManifestExpire is called when a scheduled manifest's TTL expires func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { + ttles.Lock() + defer ttles.Unlock() + ttles.onManifestExpire = f } // AddBlob schedules a blob cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + if ttles.stopped { return fmt.Errorf("scheduler not started") } @@ -82,6 +86,9 @@ func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) err // AddManifest schedules a manifest cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { + ttles.Lock() + defer ttles.Unlock() + if ttles.stopped { return fmt.Errorf("scheduler not started") } @@ -92,23 +99,9 @@ func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Durat // Start starts the scheduler func (ttles *TTLExpirationScheduler) Start() error { - return ttles.start() -} + ttles.Lock() + defer ttles.Unlock() -func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { - entry := schedulerEntry{ - Key: key, - Expiry: time.Now().Add(ttl), - EntryType: eType, - } - ttles.addChan <- entry -} - -func (ttles *TTLExpirationScheduler) stop() { - ttles.stopChan <- true -} - -func (ttles *TTLExpirationScheduler) start() error { err := ttles.readState() if err != nil { return err @@ -120,97 +113,75 @@ func (ttles *TTLExpirationScheduler) start() error { context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") ttles.stopped = false - go ttles.mainloop() + + // Start timer for each deserialized entry + for _, entry := range ttles.entries { + entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + } return nil } -// mainloop uses a select statement to listen for events. Most of its time -// is spent in waiting on a TTL to expire but can be interrupted when TTLs -// are added. -func (ttles *TTLExpirationScheduler) mainloop() { - for { - if ttles.stopped { - return - } +func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { + entry := &schedulerEntry{ + Key: key, + Expiry: time.Now().Add(ttl), + EntryType: eType, + } + context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + if oldEntry, present := ttles.entries[key]; present && oldEntry.timer != nil { + oldEntry.timer.Stop() + } + ttles.entries[key] = entry + entry.timer = ttles.startTimer(entry, ttl) - nextEntry, ttl := nextExpiringEntry(ttles.entries) - if len(ttles.entries) == 0 { - context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Nothing to do, sleeping...") - } else { - context.GetLogger(ttles.ctx).Infof("scheduler mainloop(): Sleeping for %s until cleanup of %s", ttl, nextEntry.Key) - } - - select { - case <-time.After(ttl): - var f expiryFunc - - switch nextEntry.EntryType { - case entryTypeBlob: - f = ttles.onBlobExpire - case entryTypeManifest: - f = ttles.onManifestExpire - default: - f = func(repoName string) error { - return fmt.Errorf("Unexpected scheduler entry type") - } - } - - if err := f(nextEntry.Key); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", nextEntry.Key, err) - } - - delete(ttles.entries, nextEntry.Key) - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - case entry := <-ttles.addChan: - context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - ttles.entries[entry.Key] = entry - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - break - - case <-ttles.stopChan: - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } - ttles.stopped = true - } + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } } -func nextExpiringEntry(entries map[string]schedulerEntry) (*schedulerEntry, time.Duration) { - if len(entries) == 0 { - return nil, 24 * time.Hour - } +func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { + return time.AfterFunc(ttl, func() { + ttles.Lock() + defer ttles.Unlock() - // todo:(richardscothern) this is a primitive o(n) algorithm - // but n will never be *that* big and it's all in memory. Investigate - // time.AfterFunc for heap based expiries + var f expiryFunc - first := true - var nextEntry schedulerEntry - for _, entry := range entries { - if first { - nextEntry = entry - first = false - continue + switch entry.EntryType { + case entryTypeBlob: + f = ttles.onBlobExpire + case entryTypeManifest: + f = ttles.onManifestExpire + default: + f = func(repoName string) error { + return fmt.Errorf("Unexpected scheduler entry type") + } } - if entry.Expiry.Before(nextEntry.Expiry) { - nextEntry = entry + + if err := f(entry.Key); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) } + + delete(ttles.entries, entry.Key) + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } + }) +} + +// Stop stops the scheduler. +func (ttles *TTLExpirationScheduler) Stop() { + ttles.Lock() + defer ttles.Unlock() + + if err := ttles.writeState(); err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } - // Dates may be from the past if the scheduler has - // been restarted, set their ttl to 0 - if nextEntry.Expiry.Before(time.Now()) { - nextEntry.Expiry = time.Now() - return &nextEntry, 0 + for _, entry := range ttles.entries { + entry.timer.Stop() } - - return &nextEntry, nextEntry.Expiry.Sub(time.Now()) + ttles.stopped = true } func (ttles *TTLExpirationScheduler) writeState() error { diff --git a/docs/proxy/scheduler/scheduler_test.go b/docs/proxy/scheduler/scheduler_test.go index fb5479f0..00072ed2 100644 --- a/docs/proxy/scheduler/scheduler_test.go +++ b/docs/proxy/scheduler/scheduler_test.go @@ -2,7 +2,6 @@ package scheduler import ( "encoding/json" - "fmt" "testing" "time" @@ -27,13 +26,13 @@ func TestSchedule(t *testing.T) { if !ok { t.Fatalf("Trying to remove nonexistant repo: %s", repoName) } - fmt.Println("removing", repoName) + t.Log("removing", repoName) delete(remainingRepos, repoName) return nil } s.onBlobExpire = deleteFunc - err := s.start() + err := s.Start() if err != nil { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } @@ -97,7 +96,7 @@ func TestRestoreOld(t *testing.T) { } s := New(context.Background(), fs, "/ttl") s.onBlobExpire = deleteFunc - err = s.start() + err = s.Start() if err != nil { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } @@ -124,7 +123,7 @@ func TestStopRestore(t *testing.T) { s := New(context.Background(), fs, pathToStateFile) s.onBlobExpire = deleteFunc - err := s.start() + err := s.Start() if err != nil { t.Fatalf(err.Error()) } @@ -133,13 +132,13 @@ func TestStopRestore(t *testing.T) { // Start and stop before all operations complete // state will be written to fs - s.stop() + s.Stop() time.Sleep(10 * time.Millisecond) // v2 will restore state from fs s2 := New(context.Background(), fs, pathToStateFile) s2.onBlobExpire = deleteFunc - err = s2.start() + err = s2.Start() if err != nil { t.Fatalf("Error starting v2: %s", err.Error()) } @@ -153,12 +152,11 @@ func TestStopRestore(t *testing.T) { func TestDoubleStart(t *testing.T) { s := New(context.Background(), inmemory.New(), "/ttl") - err := s.start() + err := s.Start() if err != nil { t.Fatalf("Unable to start scheduler") } - fmt.Printf("%#v", s) - err = s.start() + err = s.Start() if err == nil { t.Fatalf("Scheduler started twice without error") } From fc5ee720d1f8a231d6c774f44ba7af34ee8a8c37 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 19 Oct 2015 16:42:12 -0700 Subject: [PATCH 303/501] Correct two golint comment issues Signed-off-by: Aaron Lehmann --- docs/storage/walk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/walk.go b/docs/storage/walk.go index 8290f167..3d891276 100644 --- a/docs/storage/walk.go +++ b/docs/storage/walk.go @@ -8,7 +8,7 @@ import ( storageDriver "github.com/docker/distribution/registry/storage/driver" ) -// SkipDir is used as a return value from onFileFunc to indicate that +// ErrSkipDir is used as a return value from onFileFunc to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var ErrSkipDir = errors.New("skip this directory") From 8263cdeb5719f0322c580bf4215dcd35db274c17 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 20 Oct 2015 06:57:15 -0700 Subject: [PATCH 304/501] Update "type auth.Challenge" comment example code This interface was changed in 4a2300aaa92156ef6388521c2b9eabeae4e3cf08, but the comment wasn't ever updated to match. Signed-off-by: Andrew "Tianon" Page --- docs/auth/auth.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index 862c8d28..b3bb580d 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -21,7 +21,9 @@ // if ctx, err := accessController.Authorized(ctx, access); err != nil { // if challenge, ok := err.(auth.Challenge) { // // Let the challenge write the response. -// challenge.ServeHTTP(w, r) +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return // } else { // // Some other error. // } From 9293e3db11aa72cb4ce0c330a84dbe24d3237595 Mon Sep 17 00:00:00 2001 From: Rusty Conover Date: Sun, 25 Oct 2015 13:01:45 -0400 Subject: [PATCH 305/501] Fix failing test case When building a URL don't include path components in the host parameter. Closes #1124 Signed-off-by: Rusty Conover --- docs/api/v2/urls_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 61d41547..fdcfc31a 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -238,7 +238,8 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { base: "https://subdomain.example.com/prefix/", configHost: url.URL{ Scheme: "https", - Host: "subdomain.example.com/prefix", + Host: "subdomain.example.com", + Path: "/prefix/", }, }, } From b38b98c8a8e098b20d8695833788b0df13439c47 Mon Sep 17 00:00:00 2001 From: Matt Moore Date: Wed, 30 Sep 2015 08:47:01 -0700 Subject: [PATCH 306/501] Add `expires_in` and `issued_at` to the auth spec. This extends the specification for the Bearer token response to include information pertaining to when an issued Bearer token will expire. This also allows the client to accept `access_token` as an alias for `token`. Signed-off-by: Matt Moore --- docs/client/auth/session.go | 75 ++++++-- docs/client/auth/session_test.go | 290 ++++++++++++++++++++++++++++++- 2 files changed, 350 insertions(+), 15 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 27a2aa71..6c92fc34 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" ) @@ -85,11 +86,24 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { return nil } +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + type tokenHandler struct { header http.Header creds CredentialStore scope tokenScope transport http.RoundTripper + clock clock tokenLock sync.Mutex tokenCache string @@ -108,12 +122,24 @@ func (ts tokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + return newTokenHandler(transport, creds, realClock{}, scope, actions...) +} + +// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. +func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { return &tokenHandler{ transport: transport, creds: creds, + clock: c, scope: tokenScope{ Resource: "repository", Scope: scope, @@ -146,40 +172,43 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st func (th *tokenHandler) refreshToken(params map[string]string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() - now := time.Now() + now := th.clock.Now() if now.After(th.tokenExpiration) { - token, err := th.fetchToken(params) + tr, err := th.fetchToken(params) if err != nil { return err } - th.tokenCache = token - th.tokenExpiration = now.Add(time.Minute) + th.tokenCache = tr.Token + th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) } return nil } type tokenResponse struct { - Token string `json:"token"` + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token string, err error) { +func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { - return "", errors.New("no realm specified for token auth challenge") + return nil, errors.New("no realm specified for token auth challenge") } // TODO(dmcgowan): Handle empty scheme realmURL, err := url.Parse(realm) if err != nil { - return "", fmt.Errorf("invalid token auth challenge realm: %s", err) + return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) } req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { - return "", err + return nil, err } reqParams := req.URL.Query() @@ -206,26 +235,44 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token string, err resp, err := th.client().Do(req) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) + return nil, fmt.Errorf("unable to decode token response: %s", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken } if tr.Token == "" { - return "", errors.New("authorization server did not include a token in the response") + return nil, errors.New("authorization server did not include a token in the response") } - return tr.Token, nil + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now() + } + + return tr, nil } type basicHandler struct { diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go index 1b4754ab..f1686942 100644 --- a/docs/client/auth/session_test.go +++ b/docs/client/auth/session_test.go @@ -7,11 +7,20 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/testutil" ) +// An implementation of clock for providing fake time data. +type fakeClock struct { + current time.Time +} + +// Now implements clock +func (fc *fakeClock) Now() time.Time { return fc.current } + func testServer(rrm testutil.RequestResponseMap) (string, func()) { h := testutil.NewHandler(rrm) s := httptest.NewServer(h) @@ -210,7 +219,7 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { }, Response: testutil.Response{ StatusCode: http.StatusOK, - Body: []byte(`{"token":"statictoken"}`), + Body: []byte(`{"access_token":"statictoken"}`), }, }, }) @@ -265,6 +274,285 @@ func TestEndpointAuthorizeTokenBasic(t *testing.T) { } } +func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + clock := &fakeClock{current: time.Now()} + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + timeIncrement := 1000 * time.Second + for i := 0; i < 4; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + +func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { + service := "localhost.localdomain" + repo := "some/fun/registry" + scope := fmt.Sprintf("repository:%s:pull,push", repo) + username := "tokenuser" + password := "superSecretPa$$word" + + // This test sets things up such that the token was issued one increment + // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. + // This will mean that the token expires after 3 increments instead of 4. + clock := &fakeClock{current: time.Now()} + timeIncrement := 1000 * time.Second + firstIssuedAt := clock.Now() + clock.current = clock.current.Add(timeIncrement) + secondIssuedAt := clock.current.Add(2 * timeIncrement) + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), + }, + }, + }) + + authenicate1 := fmt.Sprintf("Basic realm=localhost") + tokenExchanges := 0 + basicCheck := func(a string) bool { + tokenExchanges = tokenExchanges + 1 + return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) + } + te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + bearerCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate2, bearerCheck) + defer c() + + creds := &testCredentialStore{ + username: username, + password: password, + } + + challengeManager := NewSimpleChallengeManager() + _, err := ping(challengeManager, e+"/v2/", "") + if err != nil { + t.Fatal(err) + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + client := &http.Client{Transport: transport1} + + // First call should result in a token exchange + // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. + // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn + // so this loop should have one fewer iteration. + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 1 { + t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) + } + clock.current = clock.current.Add(timeIncrement) + } + + // After we've exceeded the expiration, we should see a second token exchange. + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + if tokenExchanges != 2 { + t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) + } +} + func TestEndpointAuthorizeBasic(t *testing.T) { m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { From 98ad17f757f962843274a97070f4c48c6dcd5444 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Mon, 20 Jul 2015 18:45:15 +0100 Subject: [PATCH 307/501] Storage driver for: Google Cloud Storage (gcs) Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/doc.go | 3 + docs/storage/driver/gcs/gcs.go | 623 ++++++++++++++++++++++++++++ docs/storage/driver/gcs/gcs_test.go | 106 +++++ 3 files changed, 732 insertions(+) create mode 100644 docs/storage/driver/gcs/doc.go create mode 100644 docs/storage/driver/gcs/gcs.go create mode 100644 docs/storage/driver/gcs/gcs_test.go diff --git a/docs/storage/driver/gcs/doc.go b/docs/storage/driver/gcs/doc.go new file mode 100644 index 00000000..0f23ea78 --- /dev/null +++ b/docs/storage/driver/gcs/doc.go @@ -0,0 +1,3 @@ +// Package gcs implements the Google Cloud Storage driver backend. Support can be +// enabled by including the "include_gcs" build tag. +package gcs diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go new file mode 100644 index 00000000..8dc96675 --- /dev/null +++ b/docs/storage/driver/gcs/gcs.go @@ -0,0 +1,623 @@ +// Package gcs provides a storagedriver.StorageDriver implementation to +// store blobs in Google cloud storage. +// +// This package leverages the google.golang.org/cloud/storage client library +//for interfacing with gcs. +// +// Because gcs is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that gcs guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +// +// +build include_gcs + +package gcs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + + "google.golang.org/api/googleapi" + storageapi "google.golang.org/api/storage/v1" + "google.golang.org/cloud" + "google.golang.org/cloud/storage" + + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "gcs" +const dummyProjectID = "" + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type driverParameters struct { + bucket string + keyfile string + rootDirectory string +} + +func init() { + factory.Register(driverName, &gcsDriverFactory{}) +} + +// gcsDriverFactory implements the factory.StorageDriverFactory interface +type gcsDriverFactory struct{} + +// Create StorageDriver from parameters +func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// driver is a storagedriver.StorageDriver implementation backed by GCS +// Objects are stored at absolute keys in the provided bucket. +type driver struct { + client *http.Client + bucket string + email string + privateKey []byte + rootDirectory string +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - bucket +func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + keyfile, ok := parameters["keyfile"] + if !ok { + keyfile = "" + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + params := driverParameters{ + fmt.Sprint(bucket), + fmt.Sprint(keyfile), + fmt.Sprint(rootDirectory), + } + + return New(params) +} + +// New constructs a new driver +func New(params driverParameters) (storagedriver.StorageDriver, error) { + var ts oauth2.TokenSource + var err error + rootDirectory := strings.Trim(params.rootDirectory, "/") + if rootDirectory != "" { + rootDirectory += "/" + } + d := &driver{ + bucket: params.bucket, + rootDirectory: rootDirectory, + } + if params.keyfile == "" { + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + } else { + jsonKey, err := ioutil.ReadFile(params.keyfile) + if err != nil { + return nil, err + } + conf, err := google.JWTConfigFromJSON( + jsonKey, + storage.ScopeFullControl, + ) + if err != nil { + return nil, err + } + ts = conf.TokenSource(context.Background()) + d.email = conf.Email + d.privateKey = conf.PrivateKey + } + client := oauth2.NewClient(context.Background(), ts) + d.client = client + if err != nil { + return nil, err + } + return &base.Base{ + StorageDriver: d, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { + rc, err := d.ReadStream(context, path, 0) + if err != nil { + return nil, err + } + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return p, nil +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + defer wc.Close() + _, err := wc.Write(contents) + return err +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + name := d.pathToKey(path) + + // copied from google.golang.org/cloud/storage#NewReader : + // to set the additional "Range" header + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", d.bucket, name), + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + if offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) + } + res, err := d.client.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storage.StatObject(d.context(context), d.bucket, name) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + if res.StatusCode < 200 || res.StatusCode > 299 { + res.Body.Close() + return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) + } + return res.Body, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + if offset == 0 { + return d.writeCompletely(context, path, 0, reader) + } + + service, err := storageapi.New(d.client) + if err != nil { + return 0, err + } + objService := storageapi.NewObjectsService(service) + var obj *storageapi.Object + err = retry(5, func() error { + o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() + obj = o + return err + }) + // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) + if err != nil { + return 0, err + } + + // cannot append more chunks, so redo from scratch + if obj.ComponentCount >= 1023 { + return d.writeCompletely(context, path, offset, reader) + } + + // skip from reader + objSize := int64(obj.Size) + nn, err := skip(reader, objSize-offset) + if err != nil { + return nn, err + } + + // Size <= offset + partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) + gcsContext := d.context(context) + wc := storage.NewWriter(gcsContext, d.bucket, partName) + wc.ContentType = "application/octet-stream" + + if objSize < offset { + err = writeZeros(wc, offset-objSize) + if err != nil { + wc.CloseWithError(err) + return nn, err + } + } + n, err := io.Copy(wc, reader) + if err != nil { + wc.CloseWithError(err) + return nn, err + } + err = wc.Close() + if err != nil { + return nn, err + } + // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end + // of the function + defer storage.DeleteObject(gcsContext, d.bucket, partName) + + req := &storageapi.ComposeRequest{ + Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, + SourceObjects: []*storageapi.ComposeRequestSourceObjects{ + { + Name: obj.Name, + Generation: obj.Generation, + }, { + Name: partName, + Generation: wc.Object().Generation, + }}, + } + + err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) + if err == nil { + nn = nn + n + } + + return nn, err +} + +type request func() error + +func retry(maxTries int, req request) error { + backoff := time.Second + var err error + for i := 0; i < maxTries; i++ { + err := req() + if err == nil { + return nil + } + + status := err.(*googleapi.Error) + if status == nil || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + return err + } + + time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) + if i <= 4 { + backoff = backoff * 2 + } + } + return err +} + +func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + defer wc.Close() + + // Copy the first offset bytes of the existing contents + // (padded with zeros if needed) into the writer + if offset > 0 { + existing, err := d.ReadStream(context, path, 0) + if err != nil { + return 0, err + } + defer existing.Close() + n, err := io.CopyN(wc, existing, offset) + if err == io.EOF { + err = writeZeros(wc, offset-n) + } + if err != nil { + return 0, err + } + } + return io.Copy(wc, reader) +} + +func skip(reader io.Reader, count int64) (int64, error) { + if count <= 0 { + return 0, nil + } + return io.CopyN(ioutil.Discard, reader, count) +} + +func writeZeros(wc io.Writer, count int64) error { + buf := make([]byte, 32*1024) + for count > 0 { + size := cap(buf) + if int64(size) > count { + size = int(count) + } + n, err := wc.Write(buf[0:size]) + if err != nil { + return err + } + count = count - int64(n) + } + return nil +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { + var fi storagedriver.FileInfoFields + //try to get as file + gcsContext := d.context(context) + obj, err := storage.StatObject(gcsContext, d.bucket, d.pathToKey(path)) + if err == nil { + fi = storagedriver.FileInfoFields{ + Path: path, + Size: obj.Size, + ModTime: obj.Updated, + IsDir: false, + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + } + //try to get as folder + dirpath := d.pathToDirKey(path) + + var query *storage.Query + query = &storage.Query{} + query.Prefix = dirpath + query.MaxResults = 1 + + objects, err := storage.ListObjects(gcsContext, d.bucket, query) + if err != nil { + return nil, err + } + if len(objects.Results) < 1 { + return nil, storagedriver.PathNotFoundError{Path: path} + } + fi = storagedriver.FileInfoFields{ + Path: path, + IsDir: true, + } + obj = objects.Results[0] + if obj.Name == dirpath { + fi.Size = obj.Size + fi.ModTime = obj.Updated + } + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (d *driver) List(context ctx.Context, path string) ([]string, error) { + var query *storage.Query + query = &storage.Query{} + query.Delimiter = "/" + query.Prefix = d.pathToDirKey(path) + list := make([]string, 0, 64) + for { + objects, err := storage.ListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, object := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operationsCheck that the object is not deleted, + // so filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() { + name := object.Name + // Ignore objects with names that end with '#' (these are uploaded parts) + if name[len(name)-1] != '#' { + name = d.keyToPath(name) + list = append(list, name) + } + } + } + for _, subpath := range objects.Prefixes { + subpath = d.keyToPath(subpath) + list = append(list, subpath) + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { + prefix := d.pathToDirKey(sourcePath) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + destPrefix := d.pathToDirKey(destPath) + copies := make([]string, 0, len(keys)) + sort.Strings(keys) + var err error + for _, key := range keys { + dest := destPrefix + key[len(prefix):] + _, err = storage.CopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) + if err == nil { + copies = append(copies, dest) + } else { + break + } + } + // if an error occurred, attempt to cleanup the copies made + if err != nil { + for i := len(copies) - 1; i >= 0; i-- { + _ = storage.DeleteObject(gcsContext, d.bucket, copies[i]) + } + return err + } + // delete originals + for i := len(keys) - 1; i >= 0; i-- { + err2 := storage.DeleteObject(gcsContext, d.bucket, keys[i]) + if err2 != nil { + err = err2 + } + } + return err + } + _, err = storage.CopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + if err != nil { + if status := err.(*googleapi.Error); status != nil { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + } + return err + } + return storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) +} + +// listAll recursively lists all names of objects stored at "prefix" and its subpaths. +func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { + list := make([]string, 0, 64) + query := &storage.Query{} + query.Prefix = prefix + query.Versions = false + for { + objects, err := storage.ListObjects(d.context(context), d.bucket, query) + if err != nil { + return nil, err + } + for _, obj := range objects.Results { + // GCS does not guarantee strong consistency between + // DELETE and LIST operationsCheck that the object is not deleted, + // so filter out any objects with a non-zero time-deleted + if obj.Deleted.IsZero() { + list = append(list, obj.Name) + } + } + query = objects.Next + if query == nil { + break + } + } + return list, nil +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(context ctx.Context, path string) error { + prefix := d.pathToDirKey(path) + gcsContext := d.context(context) + keys, err := d.listAll(gcsContext, prefix) + if err != nil { + return err + } + if len(keys) > 0 { + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + for _, key := range keys { + if err := storage.DeleteObject(gcsContext, d.bucket, key); err != nil { + return err + } + } + return nil + } + err = storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + if err != nil { + if status := err.(*googleapi.Error); status != nil { + if status.Code == http.StatusNotFound { + return storagedriver.PathNotFoundError{Path: path} + } + } + } + return err +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// Returns ErrUnsupportedMethod if this driver has no privateKey +func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { + if d.privateKey == nil { + return "", storagedriver.ErrUnsupportedMethod + } + + name := d.pathToKey(path) + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + opts := &storage.SignedURLOptions{ + GoogleAccessID: d.email, + PrivateKey: d.privateKey, + Method: methodString, + Expires: expiresTime, + } + return storage.SignedURL(d.bucket, name, opts) +} + +func (d *driver) context(context ctx.Context) context.Context { + return cloud.WithContext(context, dummyProjectID, d.client) +} + +func (d *driver) pathToKey(path string) string { + return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") +} + +func (d *driver) pathToDirKey(path string) string { + return d.pathToKey(path) + "/" +} + +func (d *driver) keyToPath(key string) string { + return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") +} diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go new file mode 100644 index 00000000..7afc4e70 --- /dev/null +++ b/docs/storage/driver/gcs/gcs_test.go @@ -0,0 +1,106 @@ +// +build include_gcs + +package gcs + +import ( + "io/ioutil" + "os" + "testing" + + ctx "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) +var skipGCS func() string + +func init() { + bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") + keyfile := os.Getenv("REGISTRY_STORAGE_GCS_KEYFILE") + credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + + parameters := driverParameters{ + bucket, + keyfile, + rootDirectory, + } + + return New(parameters) + } + + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || (credentials == "" && keyfile == "") { + return "Must set REGISTRY_STORAGE_GCS_BUCKET and (GOOGLE_APPLICATION_CREDENTIALS or REGISTRY_STORAGE_GCS_KEYFILE) to run GCS tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return gcsDriverConstructor(root) + }, skipGCS) +} + +func TestEmptyRootList(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := gcsDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := gcsDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := ctx.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} From 00f02b5fbc344e2fd11d7f0914a15d50f6194fd8 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 23 Oct 2015 15:25:42 -0700 Subject: [PATCH 308/501] Buffer writing the scheduler entry state to disk by periodically checking for changes to the entries index and saving it to the filesystem. Signed-off-by: Richard Scothern --- docs/proxy/scheduler/scheduler.go | 45 ++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go index 6af777cc..e91920a1 100644 --- a/docs/proxy/scheduler/scheduler.go +++ b/docs/proxy/scheduler/scheduler.go @@ -16,6 +16,7 @@ type expiryFunc func(string) error const ( entryTypeBlob = iota entryTypeManifest + indexSaveFrequency = 5 * time.Second ) // schedulerEntry represents an entry in the scheduler @@ -36,6 +37,8 @@ func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpi pathToStateFile: path, ctx: ctx, stopped: true, + doneChan: make(chan struct{}), + saveTimer: time.NewTicker(indexSaveFrequency), } } @@ -54,6 +57,10 @@ type TTLExpirationScheduler struct { onBlobExpire expiryFunc onManifestExpire expiryFunc + + indexDirty bool + saveTimer *time.Ticker + doneChan chan struct{} } // OnBlobExpire is called when a scheduled blob's TTL expires @@ -119,6 +126,31 @@ func (ttles *TTLExpirationScheduler) Start() error { entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) } + // Start a ticker to periodically save the entries index + + go func() { + for { + select { + case <-ttles.saveTimer.C: + if !ttles.indexDirty { + continue + } + + ttles.Lock() + err := ttles.writeState() + if err != nil { + context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + } else { + ttles.indexDirty = false + } + ttles.Unlock() + + case <-ttles.doneChan: + return + } + } + }() + return nil } @@ -134,10 +166,7 @@ func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType in } ttles.entries[key] = entry entry.timer = ttles.startTimer(entry, ttl) - - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } + ttles.indexDirty = true } func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { @@ -163,9 +192,7 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time. } delete(ttles.entries, entry.Key) - if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) - } + ttles.indexDirty = true }) } @@ -181,6 +208,9 @@ func (ttles *TTLExpirationScheduler) Stop() { for _, entry := range ttles.entries { entry.timer.Stop() } + + close(ttles.doneChan) + ttles.saveTimer.Stop() ttles.stopped = true } @@ -194,6 +224,7 @@ func (ttles *TTLExpirationScheduler) writeState() error { if err != nil { return err } + return nil } From 854fa0a4dd7fed6812b97b94ac1b4b5f37121ac7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 2 Nov 2015 11:52:14 -0800 Subject: [PATCH 309/501] registry/storage: close filereader after allocation Signed-off-by: Stephen J Day --- docs/storage/blobwriter.go | 1 + docs/storage/blobwriter_resumable.go | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index b384fa8a..3453a57a 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -227,6 +227,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri if err != nil { return distribution.Descriptor{}, err } + defer fr.Close() tr := io.TeeReader(fr, digester.Hash()) diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index 26d3beab..d33f544d 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -91,6 +91,7 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { if err != nil { return err } + defer fr.Close() if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) From 11546b53097bb7e01376a6a7462ed14d9e657434 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Tue, 3 Nov 2015 09:59:50 +0100 Subject: [PATCH 310/501] Add support for temporary URL for Swift driver Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 198 ++++++++++++++++++------ docs/storage/driver/swift/swift_test.go | 18 ++- 2 files changed, 170 insertions(+), 46 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index c9d623d3..3b2cdc53 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -7,9 +7,6 @@ // It supports both TempAuth authentication and Keystone authentication // (up to version 3). // -// Since Swift has no concept of directories (directories are an abstration), -// empty objects are created with the MIME type application/vnd.swift.directory. -// // As Swift has a limit on the size of a single uploaded object (by default // this is 5GB), the driver makes use of the Swift Large Object Support // (http://docs.openstack.org/developer/swift/overview_large_objects.html). @@ -24,12 +21,11 @@ import ( "crypto/sha1" "crypto/tls" "encoding/hex" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" - gopath "path" + "net/url" "strconv" "strings" "time" @@ -54,22 +50,34 @@ const minChunkSize = 1 << 20 // Parameters A struct that encapsulates all of the driver parameters after all values have been set type Parameters struct { - Username string - Password string - AuthURL string - Tenant string - TenantID string - Domain string - DomainID string - TrustID string - Region string - Container string - Prefix string - InsecureSkipVerify bool - ChunkSize int + Username string + Password string + AuthURL string + Tenant string + TenantID string + Domain string + DomainID string + TrustID string + Region string + Container string + Prefix string + InsecureSkipVerify bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string } -type swiftInfo map[string]interface{} +// swiftInfo maps the JSON structure returned by Swift /info endpoint +type swiftInfo struct { + Swift struct { + Version string `mapstructure:"version"` + } + Tempurl struct { + Methods []string `mapstructure:"methods"` + } +} func init() { factory.Register(driverName, &swiftDriverFactory{}) @@ -83,11 +91,15 @@ func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (st } type driver struct { - Conn swift.Connection - Container string - Prefix string - BulkDeleteSupport bool - ChunkSize int + Conn swift.Connection + Container string + Prefix string + BulkDeleteSupport bool + ChunkSize int + SecretKey string + AccessKey string + TempURLContainerKey bool + TempURLMethods []string } type baseEmbed struct { @@ -176,11 +188,65 @@ func New(params Parameters) (*Driver, error) { } d := &driver{ - Conn: ct, - Container: params.Container, - Prefix: params.Prefix, - BulkDeleteSupport: detectBulkDelete(params.AuthURL), - ChunkSize: params.ChunkSize, + Conn: ct, + Container: params.Container, + Prefix: params.Prefix, + ChunkSize: params.ChunkSize, + TempURLMethods: make([]string, 0), + AccessKey: params.AccessKey, + } + + info := swiftInfo{} + if config, err := d.Conn.QueryInfo(); err == nil { + _, d.BulkDeleteSupport = config["bulk_delete"] + + if err := mapstructure.Decode(config, &info); err == nil { + d.TempURLContainerKey = info.Swift.Version >= "2.3.0" + d.TempURLMethods = info.Tempurl.Methods + } + } else { + d.TempURLContainerKey = params.TempURLContainerKey + d.TempURLMethods = params.TempURLMethods + } + + if len(d.TempURLMethods) > 0 { + secretKey := params.SecretKey + if secretKey == "" { + secretKey, _ = generateSecret() + } + + // Since Swift 2.2.2, we can now set secret keys on containers + // in addition to the account secret keys. Use them in preference. + if d.TempURLContainerKey { + _, containerHeaders, err := d.Conn.Container(d.Container) + if err != nil { + return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + } + + d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } else { + // Use the account secret key + _, accountHeaders, err := d.Conn.Account() + if err != nil { + return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + } + + d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] + if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { + m := swift.Metadata{} + m["temp-url-key"] = secretKey + if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { + d.SecretKey = secretKey + } + } + } } return &Driver{ @@ -590,9 +656,58 @@ func (d *driver) Delete(ctx context.Context, path string) error { } // URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod + if d.SecretKey == "" { + return "", storagedriver.ErrUnsupportedMethod + } + + methodString := "GET" + method, ok := options["method"] + if ok { + if methodString, ok = method.(string); !ok { + return "", storagedriver.ErrUnsupportedMethod + } + } + + if methodString == "HEAD" { + // A "HEAD" request on a temporary URL is allowed if the + // signature was generated with "GET", "POST" or "PUT" + methodString = "GET" + } + + supported := false + for _, method := range d.TempURLMethods { + if method == methodString { + supported = true + break + } + } + + if !supported { + return "", storagedriver.ErrUnsupportedMethod + } + + expiresTime := time.Now().Add(20 * time.Minute) + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + + tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) + + if d.AccessKey != "" { + // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature + url, _ := url.Parse(tempURL) + query := url.Query() + query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) + url.RawQuery = query.Encode() + tempURL = url.String() + } + + return tempURL, nil } func (d *driver) swiftPath(path string) string { @@ -640,19 +755,6 @@ func (d *driver) createManifest(path string, segments string) error { return nil } -func detectBulkDelete(authURL string) (bulkDelete bool) { - resp, err := http.Get(gopath.Join(authURL, "..", "..") + "/info") - if err == nil { - defer resp.Body.Close() - decoder := json.NewDecoder(resp.Body) - var infos swiftInfo - if decoder.Decode(&infos) == nil { - _, bulkDelete = infos["bulk_delete"] - } - } - return -} - func parseManifest(manifest string) (container string, prefix string) { components := strings.SplitN(manifest, "/", 2) container = components[0] @@ -661,3 +763,11 @@ func parseManifest(manifest string) (container string, prefix string) { } return container, prefix } + +func generateSecret() (string, error) { + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) + } + return hex.EncodeToString(secretBytes[:]), nil +} diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 705c2631..c4c3333c 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "os" "strconv" + "strings" "testing" "github.com/ncw/swift/swifttest" @@ -33,8 +34,13 @@ func init() { container string region string insecureSkipVerify bool - swiftServer *swifttest.SwiftServer - err error + secretKey string + accessKey string + containerKey bool + tempURLMethods []string + + swiftServer *swifttest.SwiftServer + err error ) username = os.Getenv("SWIFT_USERNAME") password = os.Getenv("SWIFT_PASSWORD") @@ -47,6 +53,10 @@ func init() { container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) + secretKey = os.Getenv("SWIFT_SECRET_KEY") + accessKey = os.Getenv("SWIFT_ACCESS_KEY") + containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) + tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") if username == "" || password == "" || authURL == "" || container == "" { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { @@ -79,6 +89,10 @@ func init() { root, insecureSkipVerify, defaultChunkSize, + secretKey, + accessKey, + containerKey, + tempURLMethods, } return New(parameters) From 34c1d0ed5076994ef18338bb1ee0a8390357fd6f Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 29 Oct 2015 12:24:56 +0100 Subject: [PATCH 311/501] Ensure read after write for segments Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 64 +++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index c9d623d3..b0237281 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -20,6 +20,7 @@ package swift import ( "bytes" + "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/tls" @@ -52,6 +53,12 @@ const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + // Parameters A struct that encapsulates all of the driver parameters after all values have been set type Parameters struct { Username string @@ -252,6 +259,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea partNumber := 1 chunkSize := int64(d.ChunkSize) zeroBuf := make([]byte, d.ChunkSize) + hash := md5.New() getSegment := func() string { return fmt.Sprintf("%s/%016d", segmentPath, partNumber) @@ -292,18 +300,13 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return 0, err } - if createManifest { - if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { - return 0, err - } - } - // First, we skip the existing segments that are not modified by this call for i := range segments { if offset < cursor+segments[i].Bytes { break } cursor += segments[i].Bytes + hash.Write([]byte(segments[i].Hash)) partNumber++ } @@ -312,7 +315,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if offset >= currentLength { for offset-currentLength >= chunkSize { // Insert a block a zero - _, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) if err != nil { if err == swift.ObjectNotFound { return 0, storagedriver.PathNotFoundError{Path: getSegment()} @@ -321,6 +324,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } currentLength += chunkSize partNumber++ + hash.Write([]byte(headers["Etag"])) } cursor = currentLength @@ -355,13 +359,23 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return false, bytesRead, err } - n, err := io.Copy(currentSegment, multi) + segmentHash := md5.New() + writer := io.MultiWriter(currentSegment, segmentHash) + + n, err := io.Copy(writer, multi) if err != nil { return false, bytesRead, err } if n > 0 { - defer currentSegment.Close() + defer func() { + closeError := currentSegment.Close() + if err != nil { + err = closeError + } + hexHash := hex.EncodeToString(segmentHash.Sum(nil)) + hash.Write([]byte(hexHash)) + }() bytesRead += n - max(0, offset-cursor) } @@ -379,7 +393,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return false, bytesRead, err } - _, copyErr := io.Copy(currentSegment, file) + _, copyErr := io.Copy(writer, file) if err := file.Close(); err != nil { if err == swift.ObjectNotFound { @@ -414,7 +428,35 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea } } - return bytesRead, nil + for ; partNumber < len(segments); partNumber++ { + hash.Write([]byte(segments[partNumber].Hash)) + } + + if createManifest { + if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { + return 0, err + } + } + + expectedHash := hex.EncodeToString(hash.Sum(nil)) + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + for { + var infos swift.Object + if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { + if strings.Trim(infos.Hash, "\"") == expectedHash { + return bytesRead, nil + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return bytesRead, err } // Stat retrieves the FileInfo for the given path, including the current size From 7840a5bc8f4991b1a9f1b7c9304b0212d21073f4 Mon Sep 17 00:00:00 2001 From: amitshukla Date: Fri, 2 Oct 2015 16:19:06 -0700 Subject: [PATCH 312/501] Fix for issue 664: https://github.com/docker/distribution/issues/664 Errors thrown by storage drivers don't have the name of the driver, causing user confusion about whether the error is coming from Docker or from a storage driver. This change adds the storage driver name to each error message. This required changing ErrUnsupportedDriver to a type, leading to code changes whenever ErrUnsupportedDriver is used. The tests check whether the driver name appears in the error message. Signed-off-by: Amit Shukla --- docs/storage/blobserver.go | 7 +- docs/storage/driver/base/base.go | 71 ++++++++++++++------ docs/storage/driver/filesystem/driver.go | 2 +- docs/storage/driver/inmemory/driver.go | 2 +- docs/storage/driver/oss/oss.go | 2 +- docs/storage/driver/rados/rados.go | 2 +- docs/storage/driver/s3/s3.go | 2 +- docs/storage/driver/storagedriver.go | 26 ++++--- docs/storage/driver/swift/swift.go | 6 +- docs/storage/driver/testsuites/testsuites.go | 23 ++++++- 10 files changed, 99 insertions(+), 44 deletions(-) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index 24aeba69..2d89ecd8 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -36,16 +36,15 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - switch err { - case nil: + if err == nil { if bs.redirect { // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err } + } - fallthrough - case driver.ErrUnsupportedMethod: + if _, ok := err.(*driver.ErrUnsupportedMethod); ok { // Fallback to serving the content directly. br, err := newFileReader(ctx, bs.driver, path, desc.Size) if err != nil { diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index 60af06b8..2333bba7 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -50,16 +50,40 @@ type Base struct { storagedriver.StorageDriver } +// Format errors received from the storage driver +func (base *Base) setDriverName(e error) error { + if e != nil { + if actualErr, ok := e.(storagedriver.ErrUnsupportedMethod); ok { + actualErr.DriverName = base.StorageDriver.Name() + return actualErr + } + if actualErr, ok := e.(storagedriver.PathNotFoundError); ok { + actualErr.DriverName = base.StorageDriver.Name() + return actualErr + } + if actualErr, ok := e.(storagedriver.InvalidPathError); ok { + actualErr.DriverName = base.StorageDriver.Name() + return actualErr + } + if actualErr, ok := e.(storagedriver.InvalidOffsetError); ok { + actualErr.DriverName = base.StorageDriver.Name() + return actualErr + } + } + return e +} + // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { ctx, done := context.WithTrace(ctx) defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.GetContent(ctx, path) + b, e := base.StorageDriver.GetContent(ctx, path) + return b, base.setDriverName(e) } // PutContent wraps PutContent of underlying storage driver. @@ -68,10 +92,10 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.PutContent(ctx, path, content) + return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) } // ReadStream wraps ReadStream of underlying storage driver. @@ -80,14 +104,15 @@ func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} } if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.ReadStream(ctx, path, offset) + rc, e := base.StorageDriver.ReadStream(ctx, path, offset) + return rc, base.setDriverName(e) } // WriteStream wraps WriteStream of underlying storage driver. @@ -96,14 +121,15 @@ func (base *Base) WriteStream(ctx context.Context, path string, offset int64, re defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} } if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path} + return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.WriteStream(ctx, path, offset, reader) + i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) + return i64, base.setDriverName(e) } // Stat wraps Stat of underlying storage driver. @@ -112,10 +138,11 @@ func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { - return nil, storagedriver.InvalidPathError{Path: path} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.Stat(ctx, path) + fi, e := base.StorageDriver.Stat(ctx, path) + return fi, base.setDriverName(e) } // List wraps List of underlying storage driver. @@ -124,10 +151,11 @@ func (base *Base) List(ctx context.Context, path string) ([]string, error) { defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { - return nil, storagedriver.InvalidPathError{Path: path} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.List(ctx, path) + str, e := base.StorageDriver.List(ctx, path) + return str, base.setDriverName(e) } // Move wraps Move of underlying storage driver. @@ -136,12 +164,12 @@ func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { - return storagedriver.InvalidPathError{Path: sourcePath} + return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} } else if !storagedriver.PathRegexp.MatchString(destPath) { - return storagedriver.InvalidPathError{Path: destPath} + return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.Move(ctx, sourcePath, destPath) + return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) } // Delete wraps Delete of underlying storage driver. @@ -150,10 +178,10 @@ func (base *Base) Delete(ctx context.Context, path string) error { defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { - return storagedriver.InvalidPathError{Path: path} + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.Delete(ctx, path) + return base.setDriverName(base.StorageDriver.Delete(ctx, path)) } // URLFor wraps URLFor of underlying storage driver. @@ -162,8 +190,9 @@ func (base *Base) URLFor(ctx context.Context, path string, options map[string]in defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { - return "", storagedriver.InvalidPathError{Path: path} + return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.StorageDriver.URLFor(ctx, path, options) + str, e := base.StorageDriver.URLFor(ctx, path, options) + return str, base.setDriverName(e) } diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index d5d8708c..20ccfce7 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -248,7 +248,7 @@ func (d *driver) Delete(ctx context.Context, subPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod + return "", new(storagedriver.ErrUnsupportedMethod) } // fullPath returns the absolute path of a key within the Driver's storage. diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go index 2d121e1c..2dad0ec8 100644 --- a/docs/storage/driver/inmemory/driver.go +++ b/docs/storage/driver/inmemory/driver.go @@ -258,5 +258,5 @@ func (d *driver) Delete(ctx context.Context, path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod + return "", new(storagedriver.ErrUnsupportedMethod) } diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index cec32026..99bca366 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -748,7 +748,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod + return "", new(storagedriver.ErrUnsupportedMethod) } } diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index b2e6590d..fa73c8d2 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -496,7 +496,7 @@ func (d *driver) Delete(ctx context.Context, objectPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod + return "", new(storagedriver.ErrUnsupportedMethod) } // Generate a blob identifier diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 46dbcd7f..0a9d80c0 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -759,7 +759,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod + return "", new(storagedriver.ErrUnsupportedMethod) } } diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index bade099f..996381c6 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -1,7 +1,6 @@ package driver import ( - "errors" "fmt" "io" "regexp" @@ -93,33 +92,42 @@ type StorageDriver interface { var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. -var ErrUnsupportedMethod = errors.New("unsupported method") +type ErrUnsupportedMethod struct { + DriverName string +} + +func (err ErrUnsupportedMethod) Error() string { + return fmt.Sprintf("[%s] unsupported method", err.DriverName) +} // PathNotFoundError is returned when operating on a nonexistent path. type PathNotFoundError struct { - Path string + Path string + DriverName string } func (err PathNotFoundError) Error() string { - return fmt.Sprintf("Path not found: %s", err.Path) + return fmt.Sprintf("[%s] Path not found: %s", err.DriverName, err.Path) } // InvalidPathError is returned when the provided path is malformed. type InvalidPathError struct { - Path string + Path string + DriverName string } func (err InvalidPathError) Error() string { - return fmt.Sprintf("Invalid path: %s", err.Path) + return fmt.Sprintf("[%s] Invalid path: %s", err.DriverName, err.Path) } // InvalidOffsetError is returned when attempting to read or write from an // invalid offset. type InvalidOffsetError struct { - Path string - Offset int64 + Path string + Offset int64 + DriverName string } func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) + return fmt.Sprintf("[%s] Invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) } diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 3b2cdc53..bd330925 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -658,14 +658,14 @@ func (d *driver) Delete(ctx context.Context, path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { if d.SecretKey == "" { - return "", storagedriver.ErrUnsupportedMethod + return "", storagedriver.ErrUnsupportedMethod{} } methodString := "GET" method, ok := options["method"] if ok { if methodString, ok = method.(string); !ok { - return "", storagedriver.ErrUnsupportedMethod + return "", storagedriver.ErrUnsupportedMethod{} } } @@ -684,7 +684,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int } if !supported { - return "", storagedriver.ErrUnsupportedMethod + return "", storagedriver.ErrUnsupportedMethod{} } expiresTime := time.Now().Add(20 * time.Minute) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 1772560b..f8117285 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -10,6 +10,7 @@ import ( "os" "path" "sort" + "strings" "sync" "testing" "time" @@ -145,10 +146,12 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } } @@ -205,6 +208,7 @@ func (suite *DriverSuite) TestReadNonexistent(c *check.C) { _, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestWriteReadStreams1 tests a simple write-read streaming workflow. @@ -321,6 +325,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) c.Assert(reader, check.IsNil) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) // Read past the end of the content and make sure we get a reader that // returns 0 bytes and io.EOF @@ -443,6 +448,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestReadNonexistentStream tests that reading a stream for a nonexistent path @@ -453,10 +459,12 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestList checks the returned list of keys after populating a directory tree. @@ -517,6 +525,7 @@ func (suite *DriverSuite) TestMove(c *check.C) { _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestMoveOverwrite checks that a moved object no longer exists at the source @@ -546,6 +555,7 @@ func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestMoveNonexistent checks that moving a nonexistent key fails and does not @@ -563,6 +573,7 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) @@ -600,6 +611,7 @@ func (suite *DriverSuite) TestDelete(c *check.C) { _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestURLFor checks that the URLFor method functions properly, but only if it @@ -614,7 +626,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(err, check.IsNil) url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if err == storagedriver.ErrUnsupportedMethod { + if _, ok := err.(*storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) @@ -628,7 +640,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(read, check.DeepEquals, contents) url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if err == storagedriver.ErrUnsupportedMethod { + if _, ok := err.(*storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) @@ -644,6 +656,7 @@ func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { err := suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestDeleteFolder checks that deleting a folder removes all child elements. @@ -671,6 +684,7 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.IsNil) @@ -684,14 +698,17 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestStatCall runs verifies the implementation of the storagedriver's Stat call. @@ -707,11 +724,13 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) c.Assert(fi, check.IsNil) fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) c.Assert(fi, check.IsNil) err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) From e79324edd8794711003e4602917b36adc175a4a0 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 2 Nov 2015 13:23:53 -0800 Subject: [PATCH 313/501] Add a generic error type to capture non-typed errors Signed-off-by: Richard Scothern --- docs/storage/blobserver.go | 6 ++-- docs/storage/driver/base/base.go | 38 +++++++++++--------- docs/storage/driver/filesystem/driver.go | 2 +- docs/storage/driver/gcs/gcs.go | 4 +-- docs/storage/driver/inmemory/driver.go | 2 +- docs/storage/driver/oss/oss.go | 2 +- docs/storage/driver/rados/rados.go | 2 +- docs/storage/driver/s3/s3.go | 2 +- docs/storage/driver/storagedriver.go | 11 ++++++ docs/storage/driver/testsuites/testsuites.go | 4 +-- 10 files changed, 44 insertions(+), 29 deletions(-) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index 2d89ecd8..45f81f53 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -36,15 +36,15 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - if err == nil { + switch err.(type) { + case nil: if bs.redirect { // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err } - } - if _, ok := err.(*driver.ErrUnsupportedMethod); ok { + case driver.ErrUnsupportedMethod: // Fallback to serving the content directly. br, err := newFileReader(ctx, bs.driver, path, desc.Size) if err != nil { diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index 2333bba7..c816d2d6 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -52,25 +52,29 @@ type Base struct { // Format errors received from the storage driver func (base *Base) setDriverName(e error) error { - if e != nil { - if actualErr, ok := e.(storagedriver.ErrUnsupportedMethod); ok { - actualErr.DriverName = base.StorageDriver.Name() - return actualErr - } - if actualErr, ok := e.(storagedriver.PathNotFoundError); ok { - actualErr.DriverName = base.StorageDriver.Name() - return actualErr - } - if actualErr, ok := e.(storagedriver.InvalidPathError); ok { - actualErr.DriverName = base.StorageDriver.Name() - return actualErr - } - if actualErr, ok := e.(storagedriver.InvalidOffsetError); ok { - actualErr.DriverName = base.StorageDriver.Name() - return actualErr + switch actual := e.(type) { + case nil: + return nil + case storagedriver.ErrUnsupportedMethod: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.PathNotFoundError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidPathError: + actual.DriverName = base.StorageDriver.Name() + return actual + case storagedriver.InvalidOffsetError: + actual.DriverName = base.StorageDriver.Name() + return actual + default: + storageError := storagedriver.Error{ + DriverName: base.StorageDriver.Name(), + Enclosed: e, } + + return storageError } - return e } // GetContent wraps GetContent of underlying storage driver. diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 20ccfce7..7dece0b3 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -248,7 +248,7 @@ func (d *driver) Delete(ctx context.Context, subPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", new(storagedriver.ErrUnsupportedMethod) + return "", storagedriver.ErrUnsupportedMethod{} } // fullPath returns the absolute path of a key within the Driver's storage. diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 8dc96675..4cef972c 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -575,7 +575,7 @@ func (d *driver) Delete(context ctx.Context, path string) error { // Returns ErrUnsupportedMethod if this driver has no privateKey func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { if d.privateKey == nil { - return "", storagedriver.ErrUnsupportedMethod + return "", storagedriver.ErrUnsupportedMethod{} } name := d.pathToKey(path) @@ -584,7 +584,7 @@ func (d *driver) URLFor(context ctx.Context, path string, options map[string]int if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod + return "", storagedriver.ErrUnsupportedMethod{} } } diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go index 2dad0ec8..b5735c0a 100644 --- a/docs/storage/driver/inmemory/driver.go +++ b/docs/storage/driver/inmemory/driver.go @@ -258,5 +258,5 @@ func (d *driver) Delete(ctx context.Context, path string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", new(storagedriver.ErrUnsupportedMethod) + return "", storagedriver.ErrUnsupportedMethod{} } diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 99bca366..c16b9949 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -748,7 +748,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", new(storagedriver.ErrUnsupportedMethod) + return "", storagedriver.ErrUnsupportedMethod{} } } diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index fa73c8d2..29bc3247 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -496,7 +496,7 @@ func (d *driver) Delete(ctx context.Context, objectPath string) error { // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", new(storagedriver.ErrUnsupportedMethod) + return "", storagedriver.ErrUnsupportedMethod{} } // Generate a blob identifier diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 0a9d80c0..7672fbdb 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -759,7 +759,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", new(storagedriver.ErrUnsupportedMethod) + return "", storagedriver.ErrUnsupportedMethod{} } } diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index 996381c6..cd1c883b 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -131,3 +131,14 @@ type InvalidOffsetError struct { func (err InvalidOffsetError) Error() string { return fmt.Sprintf("[%s] Invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) } + +// Error is a catch-all error type which captures an error string and +// the driver type on which it occured. +type Error struct { + DriverName string + Enclosed error +} + +func (err Error) Error() string { + return fmt.Sprintf("[%s] %s", err.DriverName, err.Enclosed) +} diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index f8117285..f99df8d9 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -626,7 +626,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(err, check.IsNil) url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if _, ok := err.(*storagedriver.ErrUnsupportedMethod); ok { + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) @@ -640,7 +640,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { c.Assert(read, check.DeepEquals, contents) url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if _, ok := err.(*storagedriver.ErrUnsupportedMethod); ok { + if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) From 78b6d648fa685930b366f1775a8092cd80a640c8 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 3 Nov 2015 11:03:17 -0800 Subject: [PATCH 314/501] Before allowing a schema1 manifest to be stored in the registry, ensure that it contains equal length History and FSLayer arrays. This is required to prevent malformed manifests being put to the registry and failing external verification checks. Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 26 +++++++++++++++++++++++++- docs/storage/manifeststore.go | 5 +++++ docs/storage/manifeststore_test.go | 4 ++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index c5683dfa..7f52d13d 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -804,6 +804,14 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m BlobSum: "qwer", }, }, + History: []schema1.History{ + { + V1Compatibility: "", + }, + { + V1Compatibility: "", + }, + }, } resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) @@ -999,6 +1007,19 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } + // Attempt to put a manifest with mismatching FSLayer and History array cardinalities + + unsignedManifest.History = append(unsignedManifest.History, schema1.History{ + V1Compatibility: "", + }) + invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) + if err != nil { + t.Fatalf("error signing manifest") + } + + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, invalidSigned) + checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) + return env, args } @@ -1432,8 +1453,10 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { BlobSum: "asdf", }, + }, + History: []schema1.History{ { - BlobSum: "qwer", + V1Compatibility: "", }, }, } @@ -1499,6 +1522,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { Name: imageName, Tag: tag, FSLayers: []schema1.FSLayer{}, + History: []schema1.History{}, } sm, err := schema1.Sign(m, env.pk) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index db49aaa4..d161fb5a 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -110,6 +110,11 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.Sign errs = append(errs, fmt.Errorf("repository name does not match manifest name")) } + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + if _, err := schema1.Verify(mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 30126e4b..51370e17 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -98,6 +98,10 @@ func TestManifestStorage(t *testing.T) { m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + } pk, err := libtrust.GenerateECP256PrivateKey() From f01a70c8a63731a3d35473ab3a4367b8465efb80 Mon Sep 17 00:00:00 2001 From: Ted Reed Date: Fri, 6 Nov 2015 17:10:28 -0800 Subject: [PATCH 315/501] De-obfuscate error message Previously, this error message would stringify as a pointer address, which isn't particularly helpful. This change breaks out the elements of the challenge object such that the error is appropriately represented. Signed-off-by: Ted Reed --- docs/auth/htpasswd/access.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index 5ac3d84a..82d3556d 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -94,7 +94,7 @@ func (ch challenge) SetHeaders(w http.ResponseWriter) { } func (ch challenge) Error() string { - return fmt.Sprintf("basic authentication challenge: %#v", ch) + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) } func init() { From accfa46f9ba4966af57ac2bed36ac84de71d6396 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 9 Nov 2015 15:33:05 -0800 Subject: [PATCH 316/501] Fix empty delete requests with Swift fs driver The Delete method lists objects under the given path and tries to delete all of them with a bulk delete request. If the path has no objects underneath it, the body of this request will be empty, which causes HTTP-level issues. Specifically, Go's HTTP client senses the empty request buffer and doesn't include a Content-Length, which causes the Swift server to fail the request. This commit fixes the problem by avoiding sending empty bulk delete requests. This is the correct thing to do anyway, since there's no reason to request deletion of zero objects. Signed-off-by: Aaron Lehmann --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index bd330925..358ca69f 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -587,7 +587,7 @@ func (d *driver) Delete(ctx context.Context, path string) error { return err } - if d.BulkDeleteSupport { + if len(objects) > 0 && d.BulkDeleteSupport { filenames := make([]string, len(objects)) for i, obj := range objects { filenames[i] = obj.Name From 8257e8c42a8157c671b390e4cbb158c5481a6c8e Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Thu, 22 Oct 2015 20:55:51 +0200 Subject: [PATCH 317/501] Use case of type name Signed-off-by: Troels Thomsen --- docs/storage/manifeststore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index d161fb5a..9af22541 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -47,7 +47,7 @@ func SkipLayerVerification(ms distribution.ManifestService) error { ms.skipDependencyVerification = true return nil } - return fmt.Errorf("skip layer verification only valid for manifeststore") + return fmt.Errorf("skip layer verification only valid for manifestStore") } func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { From a9a1b57900460b10bdf27eb642c5e5f02f844b22 Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Wed, 25 Nov 2015 21:16:28 +0100 Subject: [PATCH 318/501] Remove name verification Signed-off-by: Troels Thomsen --- docs/storage/manifeststore.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 9af22541..9c04b003 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -106,9 +106,6 @@ func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestSe // content, leaving trust policies of that content up to consumers. func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { var errs distribution.ErrManifestVerification - if mnfst.Name != ms.repository.Name() { - errs = append(errs, fmt.Errorf("repository name does not match manifest name")) - } if len(mnfst.History) != len(mnfst.FSLayers) { errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", From 6fb6183083a626be81d595800fcfea56ea7e0624 Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Thu, 26 Nov 2015 10:28:28 +0100 Subject: [PATCH 319/501] Verify manifest name length Signed-off-by: Troels Thomsen --- docs/storage/manifeststore.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 9c04b003..4cbfbda2 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/libtrust" ) @@ -107,6 +108,10 @@ func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestSe func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { var errs distribution.ErrManifestVerification + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax)) + } + if len(mnfst.History) != len(mnfst.FSLayers) { errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", len(mnfst.History), len(mnfst.FSLayers))) From 8299937613b6a5ae6dd1767e7ff633be5fbf893a Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Thu, 26 Nov 2015 10:28:35 +0100 Subject: [PATCH 320/501] Verify manifest name format Signed-off-by: Troels Thomsen --- docs/storage/manifeststore.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 4cbfbda2..2505b57c 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -112,6 +112,10 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.Sign errs = append(errs, fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax)) } + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, fmt.Errorf("invalid manifest name format")) + } + if len(mnfst.History) != len(mnfst.FSLayers) { errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", len(mnfst.History), len(mnfst.FSLayers))) From beeff299f86c658756e6f755ade148bb9a51069b Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Tue, 1 Dec 2015 22:22:27 +0100 Subject: [PATCH 321/501] Use well-known error type Signed-off-by: Troels Thomsen --- docs/storage/manifeststore.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 2505b57c..024c8e4b 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -109,11 +109,19 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.Sign var errs distribution.ErrManifestVerification if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax)) + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) } if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, fmt.Errorf("invalid manifest name format")) + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) } if len(mnfst.History) != len(mnfst.FSLayers) { From e1cf7c418b81b5bc6700f9efcd6ddf7f4ec06816 Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Tue, 1 Dec 2015 22:26:37 +0100 Subject: [PATCH 322/501] Map error type to error code Signed-off-by: Troels Thomsen --- docs/handlers/images.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/handlers/images.go b/docs/handlers/images.go index f753f099..d30fce26 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -169,6 +169,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http switch verificationError := verificationError.(type) { case distribution.ErrManifestBlobUnknown: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) + case distribution.ErrManifestNameInvalid: + imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) case distribution.ErrManifestUnverified: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) default: From 7bf8f846c277b67a55e377d7bafbabdf892b0514 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 1 Dec 2015 16:24:07 -0800 Subject: [PATCH 323/501] storage: correctly handle error during Walk Signed-off-by: Stephen J Day --- docs/storage/walk.go | 4 +++- docs/storage/walk_test.go | 12 +++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/storage/walk.go b/docs/storage/walk.go index 3d891276..a27c2b03 100644 --- a/docs/storage/walk.go +++ b/docs/storage/walk.go @@ -38,7 +38,9 @@ func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, } if fileInfo.IsDir() && !skipDir { - Walk(ctx, driver, child, f) + if err := Walk(ctx, driver, child, f); err != nil { + return err + } } } return nil diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go index 40b8547c..684155b2 100644 --- a/docs/storage/walk_test.go +++ b/docs/storage/walk_test.go @@ -41,10 +41,12 @@ func TestWalkErrors(t *testing.T) { t.Error("Expected invalid root err") } + errEarlyExpected := fmt.Errorf("Early termination") + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { // error on the 2nd file if fileInfo.Path() == "/a/b" { - return fmt.Errorf("Early termination") + return errEarlyExpected } delete(expected, fileInfo.Path()) return nil @@ -52,8 +54,12 @@ func TestWalkErrors(t *testing.T) { if len(expected) != fileCount-1 { t.Error("Walk failed to terminate with error") } - if err != nil { - t.Error(err.Error()) + if err != errEarlyExpected { + if err == nil { + t.Fatalf("expected an error due to early termination") + } else { + t.Error(err.Error()) + } } err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { From 93f92498ce26873fe40ddc71381e1f615cc58ceb Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 1 Dec 2015 16:24:31 -0800 Subject: [PATCH 324/501] storage: enforce sorted traversal during Walk Signed-off-by: Stephen J Day --- docs/storage/walk.go | 6 ++++++ docs/storage/walk_test.go | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/docs/storage/walk.go b/docs/storage/walk.go index a27c2b03..d979796e 100644 --- a/docs/storage/walk.go +++ b/docs/storage/walk.go @@ -3,6 +3,7 @@ package storage import ( "errors" "fmt" + "sort" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" @@ -26,7 +27,12 @@ func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, if err != nil { return err } + sort.Stable(sort.StringSlice(children)) for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. fileInfo, err := driver.Stat(ctx, child) if err != nil { return err diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go index 684155b2..5b922e0f 100644 --- a/docs/storage/walk_test.go +++ b/docs/storage/walk_test.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "sort" "testing" "github.com/docker/distribution/context" @@ -73,6 +74,7 @@ func TestWalkErrors(t *testing.T) { func TestWalk(t *testing.T) { d, expected, ctx := testFS(t) + var traversed []string err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() filetype, ok := expected[filePath] @@ -90,11 +92,17 @@ func TestWalk(t *testing.T) { } } delete(expected, filePath) + traversed = append(traversed, filePath) return nil }) if len(expected) > 0 { t.Errorf("Missed files in walk: %q", expected) } + + if !sort.StringsAreSorted(traversed) { + t.Errorf("result should be sorted: %v", traversed) + } + if err != nil { t.Fatalf(err.Error()) } From 6693e9667cd9e06319a5945c64d0fbc48859d49d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 1 Dec 2015 16:55:10 -0800 Subject: [PATCH 325/501] storage: add further tests for Walk implementation Signed-off-by: Stephen J Day --- docs/storage/walk_test.go | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go index 5b922e0f..42f67dba 100644 --- a/docs/storage/walk_test.go +++ b/docs/storage/walk_test.go @@ -12,14 +12,7 @@ import ( func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { d := inmemory.New() - c := []byte("") ctx := context.Background() - if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } expected := map[string]string{ "/a": "dir", @@ -27,6 +20,22 @@ func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Cont "/a/b/c": "dir", "/a/b/c/d": "file", "/a/b/c/e": "file", + "/a/b/f": "dir", + "/a/b/f/g": "file", + "/a/b/f/h": "file", + "/a/b/f/i": "file", + "/z": "dir", + "/z/y": "file", + } + + for p, typ := range expected { + if typ != "file" { + continue + } + + if err := d.PutContent(ctx, p, []byte(p)); err != nil { + t.Fatalf("unable to put content into fixture: %v", err) + } } return d, expected, ctx @@ -49,6 +58,7 @@ func TestWalkErrors(t *testing.T) { if fileInfo.Path() == "/a/b" { return errEarlyExpected } + delete(expected, fileInfo.Path()) return nil }) @@ -90,6 +100,13 @@ func TestWalk(t *testing.T) { if filetype != "file" { t.Errorf("Unexpected file type: %q", filePath) } + + // each file has its own path as the contents. If the length + // doesn't match the path length, fail. + if fileInfo.Size() != int64(len(fileInfo.Path())) { + t.Fatalf("unexpected size for %q: %v != %v", + fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) + } } delete(expected, filePath) traversed = append(traversed, filePath) From bf2cc0a9d65b6a6f1f17d28c7e2ba1c0a01086fd Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 30 Nov 2015 18:35:19 -0800 Subject: [PATCH 326/501] Avoid stat round-trips when fetching a blob Without this commit, three round-trips are required to fetch a blob with a progress bar. The first is a call to Stat (HEAD request), to determine the size. Then Open is called, which also calls Stat, and finally performs a GET request. Only the GET request is actually needed. The size of the blob can be sniffed from Content-Length in the GET response. This commit changes HTTPReadSeeker to automatically detect the size from Content-Length instead of requiring it to be passed in. The Stat call is removed from Open because it is no longer necessary. HTTPReadSeeker now takes an additional errorHandler callback argument which translates an unsuccessful HTTP response into an appropriate API-level error. Using a callback for this makes it possible to avoid leaking the repsonse body to Read's caller, which would make lifecycle management problematic. Fixes #1223 Signed-off-by: Aaron Lehmann --- docs/client/repository.go | 15 ++--- docs/client/transport/http_reader.go | 84 +++++++++++++++++++--------- 2 files changed, 65 insertions(+), 34 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index fc709ded..6fc2bf72 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -391,17 +391,18 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - stat, err := bs.statter.Stat(ctx, dgst) + blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) if err != nil { return nil, err } - blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return handleErrorResponse(resp) + }), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index b2e74ddb..b27b6c23 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -2,11 +2,9 @@ package transport import ( "bufio" - "bytes" "errors" "fmt" "io" - "io/ioutil" "net/http" "os" ) @@ -21,11 +19,11 @@ type ReadSeekCloser interface { // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. // TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser { +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { return &httpReadSeeker{ - client: client, - url: url, - size: size, + client: client, + url: url, + errorHandler: errorHandler, } } @@ -33,12 +31,26 @@ type httpReadSeeker struct { client *http.Client url string + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + size int64 - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 - err error + // rc is the remote read closer. + rc io.ReadCloser + // brd is a buffer for internal buffered io. + brd *bufio.Reader + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error } func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { @@ -46,16 +58,29 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } + // If we seeked to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + rd, err := hrs.reader() if err != nil { return 0, err } n, err = rd.Read(p) - hrs.offset += int64(n) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.offset >= hrs.size { + if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { err = io.EOF } @@ -67,13 +92,20 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } - var err error - newOffset := hrs.offset + _, err := hrs.reader() + if err != nil { + return 0, err + } + + newOffset := hrs.seekOffset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } newOffset = hrs.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) @@ -82,15 +114,10 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if newOffset < 0 { err = errors.New("cannot seek to negative position") } else { - if hrs.offset != newOffset { - hrs.reset() - } - - // No problems, set the offset. - hrs.offset = newOffset + hrs.seekOffset = newOffset } - return hrs.offset, err + return hrs.seekOffset, err } func (hrs *httpReadSeeker) Close() error { @@ -130,17 +157,12 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return hrs.brd, nil } - // If the offset is great than or equal to size, return a empty, noop reader. - if hrs.offset >= hrs.size { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - req, err := http.NewRequest("GET", hrs.url, nil) if err != nil { return nil, err } - if hrs.offset > 0 { + if hrs.readerOffset > 0 { // TODO(stevvooe): Get this working correctly. // If we are at different offset, issue a range request from there. @@ -158,8 +180,16 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { hrs.rc = resp.Body + if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } } else { defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } From 1f5f9bad398e374eaf4fffffa5da2c96d7d4e06a Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 2 Dec 2015 15:57:47 -0800 Subject: [PATCH 327/501] Validate digest length on parsing Signed-off-by: Tonis Tiigi --- docs/storage/cache/cachecheck/suite.go | 10 ++++---- docs/storage/manifeststore_test.go | 8 +++--- docs/storage/paths_test.go | 34 +++++++++++++------------- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/storage/cache/cachecheck/suite.go b/docs/storage/cache/cachecheck/suite.go index ed0f95fd..42390953 100644 --- a/docs/storage/cache/cachecheck/suite.go +++ b/docs/storage/cache/cachecheck/suite.go @@ -20,7 +20,7 @@ func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCachePr } func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty store: %v", err) } @@ -41,7 +41,7 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, t.Fatalf("expected error with invalid digest: %v", err) } - if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{ + if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ Digest: "", Size: 10, MediaType: "application/octet-stream"}); err == nil { @@ -52,15 +52,15 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, t.Fatalf("expected error checking for cache item with empty digest: %v", err) } - if _, err := cache.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown { + if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty repo: %v", err) } } func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc") + localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ - Digest: "sha256:abc", + Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", Size: 10, MediaType: "application/octet-stream"} diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 51370e17..928ce219 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -385,15 +385,15 @@ func TestLinkPathFuncs(t *testing.T) { }{ { repo: "foo/bar", - digest: "sha256:deadbeaf", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: blobLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf/link", + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, { repo: "foo/bar", - digest: "sha256:deadbeaf", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: manifestRevisionLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, } { p, err := testcase.linkPathFn(testcase.repo, testcase.digest) diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 9e91a3fa..238e2f37 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -15,31 +15,31 @@ func TestPathMapper(t *testing.T) { { spec: manifestRevisionPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestRevisionLinkPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignatureLinkPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", - signature: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignaturesPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures", }, { spec: manifestTagsPathSpec{ @@ -72,17 +72,17 @@ func TestPathMapper(t *testing.T) { spec: manifestTagIndexEntryPathSpec{ name: "foo/bar", tag: "thetag", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestTagIndexEntryLinkPathSpec{ name: "foo/bar", tag: "thetag", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: layerLinkPathSpec{ @@ -93,15 +93,15 @@ func TestPathMapper(t *testing.T) { }, { spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), + digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), }, - expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", }, { spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), + digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), }, - expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", }, { From b596464d382d97b399852432fc2cb31918c230b5 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Fri, 30 Oct 2015 17:08:56 +0100 Subject: [PATCH 328/501] Use bulk delete to remove segments in Swift driver Signed-off-by: Sylvain Baubeau --- docs/storage/driver/swift/swift.go | 49 ++++++++++++------------- docs/storage/driver/swift/swift_test.go | 21 ++++++++++- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index e0dada31..6d021ea4 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -629,19 +629,6 @@ func (d *driver) Delete(ctx context.Context, path string) error { return err } - if len(objects) > 0 && d.BulkDeleteSupport { - filenames := make([]string, len(objects)) - for i, obj := range objects { - filenames[i] = obj.Name - } - if _, err := d.Conn.BulkDelete(d.Container, filenames); err != swift.Forbidden { - if err == swift.ContainerNotFound { - return storagedriver.PathNotFoundError{Path: path} - } - return err - } - } - for _, obj := range objects { if obj.PseudoDirectory { continue @@ -649,20 +636,12 @@ func (d *driver) Delete(ctx context.Context, path string) error { if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { manifest, ok := headers["X-Object-Manifest"] if ok { - segContainer, prefix := parseManifest(manifest) + _, prefix := parseManifest(manifest) segments, err := d.getAllSegments(prefix) if err != nil { return err } - - for _, s := range segments { - if err := d.Conn.ObjectDelete(segContainer, s.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: s.Name} - } - return err - } - } + objects = append(objects, segments...) } } else { if err == swift.ObjectNotFound { @@ -670,13 +649,31 @@ func (d *driver) Delete(ctx context.Context, path string) error { } return err } + } - if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { - if err == swift.ObjectNotFound { - return storagedriver.PathNotFoundError{Path: obj.Name} + if d.BulkDeleteSupport && len(objects) > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj.Name + } + _, err = d.Conn.BulkDelete(d.Container, filenames) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { + if err == swift.ContainerNotFound { + return storagedriver.PathNotFoundError{Path: path} } return err } + } else { + for _, obj := range objects { + if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { + if err == swift.ObjectNotFound { + return storagedriver.PathNotFoundError{Path: obj.Name} + } + return err + } + } } _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index c4c3333c..b2ff6001 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -134,7 +134,6 @@ func TestEmptyRootList(t *testing.T) { if err != nil { t.Fatalf("unexpected error creating content: %v", err) } - defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { @@ -149,4 +148,24 @@ func TestEmptyRootList(t *testing.T) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } + + // Create an object with a path nested under the existing object + err = rootedDriver.PutContent(ctx, filename+"/file1", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + + err = rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to delete: %v", err) + } + + keys, err = rootedDriver.List(ctx, "/") + if err != nil { + t.Fatalf("failed to list objects after deletion: %v", err) + } + + if len(keys) != 0 { + t.Fatal("delete did not remove nested objects") + } } From d6cc32965e0543438d31db47dd5f0dc1280296a6 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 4 Dec 2015 22:12:32 +0300 Subject: [PATCH 329/501] Fix comment for PathRegexp Signed-off-by: Anton Tiurin --- docs/storage/driver/storagedriver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index cd1c883b..f15d50a9 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -87,7 +87,7 @@ type StorageDriver interface { // PathRegexp is the regular expression which each file path must match. A // file path is absolute, beginning with a slash and containing a positive // number of path components separated by slashes, where each component is -// restricted to lowercase alphanumeric characters or a period, underscore, or +// restricted to alphanumeric characters or a period, underscore, or // hyphen. var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) From fb2142147fbde48cbcba16863e6080b452106ae0 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 7 Dec 2015 10:17:49 -0800 Subject: [PATCH 330/501] Add clearer messaging around missing content-length headers. Signed-off-by: Richard Scothern --- docs/client/repository.go | 4 +++ docs/client/repository_test.go | 53 ++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/docs/client/repository.go b/docs/client/repository.go index fc709ded..f8bcaaaa 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -487,6 +487,10 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 1e6eb25f..058947de 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -159,6 +159,59 @@ func TestBlobFetch(t *testing.T) { // TODO(dmcgowan): Test for unknown blob case } +func TestBlobExistsNoContentLength(t *testing.T) { + var m testutil.RequestResponseMap + + repo := "biff" + dgst, content := newRandomBlob(1024) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + _, err = l.Stat(ctx, dgst) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "missing content-length heade") { + t.Fatalf("Expected missing content-length error message") + } + +} + func TestBlobExists(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap From ecb84029ecc0efd55d087583d127755755be36db Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Sun, 6 Dec 2015 14:41:38 -0800 Subject: [PATCH 331/501] Make the catalog more efficient This change removes the sort() from the Repositories() function since we're now guaranteed to have a lexigraphically sorted walk. Signed-off-by: Patrick Devine --- docs/storage/catalog.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go index b6768012..481489f2 100644 --- a/docs/storage/catalog.go +++ b/docs/storage/catalog.go @@ -4,19 +4,22 @@ import ( "errors" "io" "path" - "sort" "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) +// ErrFinishedWalk is used when the called walk function no longer wants +// to accept any more values. This is used for pagination when the +// required number of repos have been found. +var ErrFinishedWalk = errors.New("finished walk") + // Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { var foundRepos []string - var errVal error if len(repos) == 0 { return 0, errors.New("no space in slice") @@ -27,12 +30,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, err } - // Walk each of the directories in our storage. Unfortunately since there's no - // guarantee that storage will return files in lexigraphical order, we have - // to store everything another slice, sort it and then copy it back to our - // passed in slice. - - Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() // lop the base path off @@ -49,17 +47,20 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return ErrSkipDir } + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return ErrFinishedWalk + } + return nil }) - sort.Strings(foundRepos) n = copy(repos, foundRepos) // Signal that we have no more entries by setting EOF - if len(foundRepos) <= len(repos) { + if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { errVal = io.EOF } return n, errVal - } From be2985a35de0e984630d312b99e0af63bd4f3750 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 7 Dec 2015 18:54:22 -0800 Subject: [PATCH 332/501] storage/driver: decrease memory allocation done during testsuite Signed-off-by: Stephen J Day --- docs/storage/driver/testsuites/testsuites.go | 29 ++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index f99df8d9..d798a571 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -1135,12 +1135,19 @@ func randomFilename(length int64) string { return string(b) } -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) +// randomBytes pre-allocates all of the memory sizes needed for the test. If +// anything panics while accessing randomBytes, just make this number bigger. +var randomBytes = make([]byte, 96<<20) + +func init() { + // increase the random bytes to the required maximum + for i := range randomBytes { + randomBytes[i] = byte(rand.Intn(2 << 8)) } - return b +} + +func randomContents(length int64) []byte { + return randomBytes[:length] } type randReader struct { @@ -1151,14 +1158,14 @@ type randReader struct { func (rr *randReader) Read(p []byte) (n int, err error) { rr.m.Lock() defer rr.m.Unlock() - for i := 0; i < len(p) && rr.r > 0; i++ { - p[i] = byte(rand.Intn(255)) - n++ - rr.r-- - } - if rr.r == 0 { + + n = copy(p, randomContents(int64(len(p)))) + rr.r -= int64(n) + + if rr.r <= 0 { err = io.EOF } + return } From 4829e9685ecdf72a99d26aa5326fbbc88603262d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 13 Nov 2015 13:47:07 -0800 Subject: [PATCH 333/501] registry/storage/driver: checking that non-existent path returns PathNotFoundError Issue #1186 describes a condition where a null tags response is returned when using the s3 driver. The issue seems to be related to a missing PathNotFoundError in s3. This change adds a test for that to get an idea of the lack of compliance across storage drivers. If the failures are manageable, we'll add this test condition and fix the s3 driver. Signed-off-by: Stephen J Day --- docs/storage/driver/testsuites/testsuites.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index f99df8d9..bb9d289d 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -472,6 +472,13 @@ func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) + doesnotexist := path.Join(rootDirectory, "nonexistent") + _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) + c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ + Path: doesnotexist, + DriverName: suite.StorageDriver.Name(), + }) + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { From c46d32bfbb68a5b6b331c0100c4e091e9e5da281 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 18 Nov 2015 16:11:44 +0100 Subject: [PATCH 334/501] driver/filesystem: address filesystem driver on behavior of List Signed-off-by: Stephen J Day --- docs/storage/driver/filesystem/driver.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 7dece0b3..480bd687 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -184,9 +184,6 @@ func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileIn // List returns a list of the objects that are direct descendants of the given // path. func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { - if subPath[len(subPath)-1] != '/' { - subPath += "/" - } fullPath := d.fullPath(subPath) dir, err := os.Open(fullPath) From dc5b71afb032cb2e7dbc5fde869abf4c5f901510 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 24 Nov 2015 14:17:25 -0800 Subject: [PATCH 335/501] storage/driver/base: use correct error format style Signed-off-by: Stephen J Day --- docs/storage/driver/storagedriver.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index f15d50a9..dc8bdc8d 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -97,7 +97,7 @@ type ErrUnsupportedMethod struct { } func (err ErrUnsupportedMethod) Error() string { - return fmt.Sprintf("[%s] unsupported method", err.DriverName) + return fmt.Sprintf("%s: unsupported method", err.DriverName) } // PathNotFoundError is returned when operating on a nonexistent path. @@ -107,7 +107,7 @@ type PathNotFoundError struct { } func (err PathNotFoundError) Error() string { - return fmt.Sprintf("[%s] Path not found: %s", err.DriverName, err.Path) + return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) } // InvalidPathError is returned when the provided path is malformed. @@ -117,7 +117,7 @@ type InvalidPathError struct { } func (err InvalidPathError) Error() string { - return fmt.Sprintf("[%s] Invalid path: %s", err.DriverName, err.Path) + return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) } // InvalidOffsetError is returned when attempting to read or write from an @@ -129,7 +129,7 @@ type InvalidOffsetError struct { } func (err InvalidOffsetError) Error() string { - return fmt.Sprintf("[%s] Invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) + return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) } // Error is a catch-all error type which captures an error string and @@ -140,5 +140,5 @@ type Error struct { } func (err Error) Error() string { - return fmt.Sprintf("[%s] %s", err.DriverName, err.Enclosed) + return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) } From 10f7b7bf95f200630ab23ca1d2a01f48ef22d129 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 24 Nov 2015 14:23:12 -0800 Subject: [PATCH 336/501] storage/driver/s3: correct response on list of missing directory Signed-off-by: Stephen J Day --- docs/storage/driver/s3/s3.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 7672fbdb..a9f303dc 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -685,6 +685,12 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { return nil, err } + if len(listResponse.Contents) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: path} + } + files := []string{} directories := []string{} From c39158d48ca60f99d1274152b53d2f4fa7b4e5b8 Mon Sep 17 00:00:00 2001 From: Vincent Giersch Date: Wed, 25 Nov 2015 00:13:36 +0100 Subject: [PATCH 337/501] driver/rados: treat OMAP EIO as a PathNotFoundError RADOS returns a -EIO when trying to read a non-existing OMAP, treat it as a PathNotFoundError when trying to list a non existing virtual directory. Signed-off-by: Vincent Giersch --- docs/storage/driver/rados/rados.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go index 29bc3247..c2be528e 100644 --- a/docs/storage/driver/rados/rados.go +++ b/docs/storage/driver/rados/rados.go @@ -404,7 +404,7 @@ func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { files, err := d.listDirectoryOid(dirPath) if err != nil { - return nil, err + return nil, storagedriver.PathNotFoundError{Path: dirPath} } keys := make([]string, 0, len(files)) From aa08ced9d73502222ad04931598ef7770e630d6d Mon Sep 17 00:00:00 2001 From: davidli Date: Tue, 1 Dec 2015 10:30:14 +0800 Subject: [PATCH 338/501] driver/swift: treat empty object list as a PathNotFoundError Swift returns an empty object list when trying to read a non-existing object path, treat it as a PathNotFoundError when trying to list a non existing virtual directory. Signed-off-by: David li --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 6d021ea4..86bce794 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -589,7 +589,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } - if err == swift.ContainerNotFound { + if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { return files, storagedriver.PathNotFoundError{Path: path} } return files, err From 3a5c6446d851d25757ea84ab1f4b1a3ab5609c4b Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 1 Dec 2015 17:06:06 +0800 Subject: [PATCH 339/501] Fix for stevvooe:check-storage-drivers-list-path-not-found in OSS driver Change-Id: I5e96fe761d3833c962084fd2d597f47e8a72e7c2 Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index c16b9949..e9e877a5 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -669,6 +669,12 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { return nil, err } + if len(listResponse.Contents) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in OSS. + return nil, storagedriver.PathNotFoundError{Path: path} + } + files := []string{} directories := []string{} From 533c912d3ef08116102acde16d19bf42327b6064 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 8 Dec 2015 19:55:28 +0800 Subject: [PATCH 340/501] Fix the issue for listing root directory Change-Id: I1c6181fa4e5666bd2e6ec69cb608c4778ae0fe48 Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index e9e877a5..09b25ef0 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -666,10 +666,10 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) if err != nil { - return nil, err + return nil, parseError(path, err) } - if len(listResponse.Contents) == 0 { + if len(listResponse.Contents) == 0 && path != "/" { // Treat empty response as missing directory, since we don't actually // have directories in OSS. return nil, storagedriver.PathNotFoundError{Path: path} From d38e02c52f493cac9aac6c687e3987b09013d75a Mon Sep 17 00:00:00 2001 From: Kenny Leung Date: Tue, 8 Dec 2015 14:24:03 -0800 Subject: [PATCH 341/501] Print error for failed HTTP auth request. Signed-off-by: Kenny Leung --- docs/client/auth/session.go | 3 ++- docs/client/blob_writer.go | 2 +- docs/client/errors.go | 6 +++++- docs/client/repository.go | 20 ++++++++++---------- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 6c92fc34..8594b66f 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -240,7 +240,8 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := client.HandleErrorResponse(resp) + return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s: %q", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode), err) } decoder := json.NewDecoder(resp.Body) diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index c7eee4e8..21a018dc 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -33,7 +33,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { diff --git a/docs/client/errors.go b/docs/client/errors.go index 7305c021..8e3cb108 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -47,7 +47,11 @@ func parseHTTPErrorResponse(r io.Reader) error { return errors } -func handleErrorResponse(resp *http.Response) error { +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { diff --git a/docs/client/repository.go b/docs/client/repository.go index bb10ece7..421584ad 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -91,7 +91,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri returnErr = io.EOF } } else { - return 0, handleErrorResponse(resp) + return 0, HandleErrorResponse(resp) } return numFilled, returnErr @@ -213,7 +213,7 @@ func (ms *manifests) Tags() ([]string, error) { return tagsResponse.Tags, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { @@ -238,7 +238,7 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { } else if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, handleErrorResponse(resp) + return false, HandleErrorResponse(resp) } func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { @@ -297,7 +297,7 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic } return &sm, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } func (ms *manifests) Put(m *schema1.SignedManifest) error { @@ -323,7 +323,7 @@ func (ms *manifests) Put(m *schema1.SignedManifest) error { // TODO(dmcgowan): make use of digest header return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (ms *manifests) Delete(dgst digest.Digest) error { @@ -345,7 +345,7 @@ func (ms *manifests) Delete(dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } type blobs struct { @@ -401,7 +401,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) }), nil } @@ -457,7 +457,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { location: location, }, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -505,7 +505,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -542,7 +542,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { From d68acc869e89b7e54369c6bf13ff6b520783e927 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 8 Dec 2015 11:02:40 -0800 Subject: [PATCH 342/501] storage/driver/s3: adjust s3 driver to return unmunged path This fixes both the s3 driver and the oss driver to return the unmunged path when returning errors. Signed-off-by: Stephen J Day --- docs/storage/driver/oss/oss.go | 21 ++++++++++++--------- docs/storage/driver/s3/s3.go | 19 +++++++++++-------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 09b25ef0..c6e4f8a3 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -651,8 +651,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, } // List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { - if path != "/" && path[len(path)-1] != '/' { +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && opath[len(path)-1] != '/' { path = path + "/" } @@ -666,13 +667,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) if err != nil { - return nil, parseError(path, err) - } - - if len(listResponse.Contents) == 0 && path != "/" { - // Treat empty response as missing directory, since we don't actually - // have directories in OSS. - return nil, storagedriver.PathNotFoundError{Path: path} + return nil, parseError(opath, err) } files := []string{} @@ -697,6 +692,14 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { } } + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + return append(files, directories...), nil } diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index a9f303dc..7bb23a85 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -667,7 +667,8 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, } // List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, path string) ([]string, error) { +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath if path != "/" && path[len(path)-1] != '/' { path = path + "/" } @@ -682,13 +683,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) if err != nil { - return nil, err - } - - if len(listResponse.Contents) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: path} + return nil, parseError(opath, err) } files := []string{} @@ -713,6 +708,14 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { } } + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + return append(files, directories...), nil } From cfd2f039209d43036f46de0b6ef8d7462766d598 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 25 Oct 2015 11:01:15 +0800 Subject: [PATCH 343/501] Support large layer for OSS driver Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 67 +++++++++------------------------- 1 file changed, 17 insertions(+), 50 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index c6e4f8a3..4dfe5675 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -39,6 +39,7 @@ const driverName = "oss" const minChunkSize = 5 << 20 const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk // listMax is the largest amount of objects you can request from OSS in a list call const listMax = 1000 @@ -195,13 +196,14 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return New(params) } -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // bucketName func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) + client.SetDebug(false) // Validate that the given credentials have at least read permissions in the // given bucket scope. @@ -403,35 +405,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea var err error var part oss.Part - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the OSS package does not. We may add oss - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to OSS slows to a crawl. If the RequestTimeout - // ends up getting added to the OSS library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *oss.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } + part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) if err != nil { logrus.Errorf("error putting part, aborting: %v", err) @@ -456,7 +430,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if offset > 0 { resp, err := d.Bucket.Head(d.ossPath(path), nil) if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != 404 { return 0, err } } @@ -511,7 +485,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea fromZeroFillLarge := func(from, to int64) error { bytesRead64 := int64(0) for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) if err != nil { return err } @@ -553,7 +527,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea return totalRead, err } - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) if err != nil { return totalRead, err } @@ -706,15 +680,14 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), - oss.CopyOptions{ - //Options: d.getOptions(), - //ContentType: d.getContentType() - }, - d.Bucket.Path(d.ossPath(sourcePath))) + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + + err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}) if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) return parseError(sourcePath, err) } @@ -756,13 +729,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int method, ok := options["method"] if ok { methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} + if !ok || (methodString != "GET" && methodString != "PUT") { + return "", storagedriver.ErrUnsupportedMethod{driverName} } } expiresTime := time.Now().Add(20 * time.Minute) - logrus.Infof("expiresTime: %d", expiresTime) expires, ok := options["expiry"] if ok { @@ -771,7 +743,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int expiresTime = et } } - logrus.Infof("expiresTime: %d", expiresTime) + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) logrus.Infof("testURL: %s", testURL) return testURL, nil @@ -781,11 +753,6 @@ func (d *driver) ossPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } -// S3BucketKey returns the OSS bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).ossPath(path) -} - func parseError(path string, err error) error { if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { return storagedriver.PathNotFoundError{Path: path} From 4ebaacfcdae9ebcdb02571f88047d4d0efaf89b1 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Fri, 11 Dec 2015 15:13:03 -0800 Subject: [PATCH 344/501] Remove unnecessary stat from blob Get method This calls Stat before Open, which should be unnecessary because Open can handle the case of a nonexistent blob. Removing the Stat saves a round trip. This is similar to the removal of stat in Open in #1226. Signed-off-by: Aaron Lehmann --- docs/client/repository.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index bb10ece7..8f525f0d 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -377,11 +377,7 @@ func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Des } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - desc, err := bs.Stat(ctx, dgst) - if err != nil { - return nil, err - } - reader, err := bs.Open(ctx, desc.Digest) + reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } From 58232e50cf60edfa6d4014b2e8399c4049759b98 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 14 Dec 2015 14:30:51 -0800 Subject: [PATCH 345/501] Simplify digest.FromBytes calling convention The current implementation of digest.FromBytes returns an error. This error can never be non-nil, but its presence in the function signature means each call site needs error handling code for an error that is always nil. I verified that none of the hash.Hash implementations in the standard library can return an error on Write. Nor can any of the hash.Hash implementations vendored in distribution. This commit changes digest.FromBytes not to return an error. If Write returns an error, it will panic, but as discussed above, this should never happen. This commit also avoids using a bytes.Reader to feed data into the hash function in FromBytes. This makes the hypothetical case that would panic a bit more explicit, and should also be more performant. Signed-off-by: Aaron Lehmann --- docs/client/repository_test.go | 16 +++------------- docs/handlers/api_test.go | 6 ++---- docs/handlers/images.go | 8 +------- docs/proxy/proxyblobstore_test.go | 10 ++-------- docs/proxy/proxymanifeststore.go | 7 +------ docs/proxy/proxymanifeststore_test.go | 2 +- docs/storage/blob_test.go | 5 +---- docs/storage/blobstore.go | 7 +------ docs/storage/linkedblobstore.go | 5 +---- docs/storage/manifeststore_test.go | 6 +----- 10 files changed, 14 insertions(+), 58 deletions(-) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 058947de..a001b62f 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -38,12 +38,7 @@ func newRandomBlob(size int) (digest.Digest, []byte) { panic("unable to read enough bytes") } - dgst, err := digest.FromBytes(b) - if err != nil { - panic(err) - } - - return dgst, b + return digest.FromBytes(b), b } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { @@ -509,16 +504,11 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed panic(err) } - dgst, err := digest.FromBytes(p) - if err != nil { - panic(err) - } - - return sm, dgst, p + return sm, digest.FromBytes(p), p } func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { - actualDigest, _ := digest.FromBytes(content) + actualDigest := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", Route: "/v2/" + repo + "/manifests/" + reference, diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 7f52d13d..8dbec0fe 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -880,8 +880,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m payload, err := signedManifest.Payload() checkErr(t, err, "getting manifest payload") - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") + dgst := digest.FromBytes(payload) args.signedManifest = signedManifest args.dgst = dgst @@ -1487,8 +1486,7 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) payload, err := signedManifest.Payload() checkErr(t, err, "getting manifest payload") - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") + dgst := digest.FromBytes(payload) manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") diff --git a/docs/handlers/images.go b/docs/handlers/images.go index d30fce26..2ec51b99 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -250,11 +250,5 @@ func digestManifest(ctx context.Context, sm *schema1.SignedManifest) (digest.Dig p = sm.Raw } - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) - return "", err - } - - return dgst, err + return digest.FromBytes(p), nil } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index a88fd8b3..eb623197 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -298,10 +298,7 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { } bodyBytes := w.Body.Bytes() - localDigest, err := digest.FromBytes(bodyBytes) - if err != nil { - t.Fatalf("Error making digest from blob") - } + localDigest := digest.FromBytes(bodyBytes) if localDigest != remoteBlob.Digest { t.Fatalf("Mismatching blob fetch from proxy") } @@ -335,10 +332,7 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { t.Fatalf(err.Error()) } - dl, err := digest.FromBytes(w.Body.Bytes()) - if err != nil { - t.Fatalf("Error making digest from blob") - } + dl := digest.FromBytes(w.Body.Bytes()) if dl != dr.Digest { t.Errorf("Mismatching blob fetch from proxy") } diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 610d695e..1e9e24de 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -137,12 +137,7 @@ func manifestDigest(sm *schema1.SignedManifest) (digest.Digest, error) { } - dgst, err := digest.FromBytes(payload) - if err != nil { - return "", err - } - - return dgst, nil + return digest.FromBytes(payload), nil } func (pms proxyManifestStore) Put(manifest *schema1.SignedManifest) error { diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 6e0fc51e..a5a0a21b 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -177,7 +177,7 @@ func populateRepo(t *testing.T, ctx context.Context, repository distribution.Rep if err != nil { t.Fatal(err) } - return digest.FromBytes(pl) + return digest.FromBytes(pl), nil } // TestProxyManifests contains basic acceptance tests diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index c84c7432..ab533bd6 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -176,10 +176,7 @@ func TestSimpleBlobUpload(t *testing.T) { if err != nil { t.Fatalf("Error reading all of blob %s", err.Error()) } - expectedDigest, err := digest.FromBytes(randomBlob) - if err != nil { - t.Fatalf("Error getting digest from bytes: %s", err) - } + expectedDigest := digest.FromBytes(randomBlob) simpleUpload(t, bs, randomBlob, expectedDigest) d, err = bs.Stat(ctx, expectedDigest) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index f6a8ac43..f8fe23fe 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -56,12 +56,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution // content is already present, only the digest will be returned. This should // only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) - return distribution.Descriptor{}, err - } - + dgst := digest.FromBytes(p) desc, err := bs.statter.Stat(ctx, dgst) if err == nil { // content already present diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index f01088ba..25640367 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -75,10 +75,7 @@ func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter } func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - return distribution.Descriptor{}, err - } + dgst := digest.FromBytes(p) // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 928ce219..de31b364 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -185,11 +185,7 @@ func TestManifestStorage(t *testing.T) { // Now that we have a payload, take a moment to check that the manifest is // return by the payload digest. - dgst, err := digest.FromBytes(payload) - if err != nil { - t.Fatalf("error getting manifest digest: %v", err) - } - + dgst := digest.FromBytes(payload) exists, err = ms.Exists(dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) From a077202f8853c9d81d14f94279d7c1e4fc19ce69 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 15 Dec 2015 17:18:13 -0800 Subject: [PATCH 346/501] Remove tarsum support for digest package tarsum is not actually used by the registry. Remove support for it. Convert numerous uses in unit tests to SHA256. Update docs to remove mentions of tarsums (which were often inaccurate). Remove tarsum dependency. Signed-off-by: Aaron Lehmann --- docs/api/v2/routes_test.go | 8 -------- docs/api/v2/urls_test.go | 12 ++++++------ docs/handlers/api_test.go | 22 ++++++++-------------- docs/handlers/app_test.go | 7 ------- docs/storage/blob_test.go | 13 +++---------- docs/storage/blobwriter.go | 2 +- docs/storage/cache/redis/redis.go | 2 +- docs/storage/linkedblobstore.go | 2 +- docs/storage/paths.go | 23 ++--------------------- docs/storage/paths_test.go | 21 --------------------- 10 files changed, 22 insertions(+), 90 deletions(-) diff --git a/docs/api/v2/routes_test.go b/docs/api/v2/routes_test.go index f6379977..f632d981 100644 --- a/docs/api/v2/routes_test.go +++ b/docs/api/v2/routes_test.go @@ -87,14 +87,6 @@ func TestRouter(t *testing.T) { "name": "docker.com/foo/bar/baz", }, }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "tarsum.dev+foo:abcdef0919234", - }, - }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index fdcfc31a..16e05695 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -35,9 +35,9 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { }, { description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + return urlBuilder.BuildBlobURL("foo/bar", "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") }, }, { @@ -49,11 +49,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { }, { description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, @@ -66,11 +66,11 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { }, { description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 8dbec0fe..7b7c3c0d 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -251,22 +251,18 @@ type blobArgs struct { imageName string layerFile io.ReadSeeker layerDigest digest.Digest - tarSumStr string } func makeBlobArgs(t *testing.T) blobArgs { - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) - args := blobArgs{ imageName: "foo/bar", layerFile: layerFile, layerDigest: layerDigest, - tarSumStr: tarSumStr, } return args } @@ -393,7 +389,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------------- // Do layer push with an empty body and correct digest - zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) + zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error digesting empty buffer: %v", err) } @@ -406,7 +402,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // This is a valid but empty tarfile! emptyTar := bytes.Repeat([]byte("\x00"), 1024) - emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) + emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) if err != nil { t.Fatalf("unexpected error digesting empty tar: %v", err) } @@ -476,7 +472,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ---------------- // Fetch the layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = http.Get(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) @@ -523,7 +519,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) // Missing tests: - // - Upload the same tarsum file under and different repository and + // - Upload the same tar file under and different repository and // ensure the content remains uncorrupted. return env } @@ -570,7 +566,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // ---------------- // Attempt to delete a layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = httpDelete(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) @@ -612,12 +608,11 @@ func TestDeleteDisabled(t *testing.T) { imageName := "foo/bar" // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) if err != nil { t.Fatalf("Error building blob URL") @@ -638,12 +633,11 @@ func TestDeleteReadOnly(t *testing.T) { imageName := "foo/bar" // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) if err != nil { t.Fatalf("Error building blob URL") diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 9e2514d8..de27f443 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -102,13 +102,6 @@ func TestAppDispatcher(t *testing.T) { "name", "foo/bar", }, }, - { - endpoint: v2.RouteNameBlob, - vars: []string{ - "name", "foo/bar", - "digest", "tarsum.v1+bogus:abcdef0123456789", - }, - }, { endpoint: v2.RouteNameBlobUpload, vars: []string{ diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index ab533bd6..c6cfbcda 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -20,16 +20,11 @@ import ( // TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + randomDataReader, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } - dgst := digest.Digest(tarSumStr) - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - ctx := context.Background() imageName := "foo/bar" driver := inmemory.New() @@ -225,13 +220,11 @@ func TestSimpleBlobRead(t *testing.T) { } bs := repository.Blobs(ctx) - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. if err != nil { t.Fatalf("error creating random data: %v", err) } - dgst := digest.Digest(tarSumStr) - // Test for existence. desc, err := bs.Stat(ctx, dgst) if err != distribution.ErrBlobUnknown { @@ -358,7 +351,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec if dgst != expectedDigest { // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) } desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 3453a57a..37903176 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -302,7 +302,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. + // to this happen for the digest of an empty tar. if desc.Digest == digest.DigestSha256EmptyTar { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } diff --git a/docs/storage/cache/redis/redis.go b/docs/storage/cache/redis/redis.go index 1736756e..cb264b09 100644 --- a/docs/storage/cache/redis/redis.go +++ b/docs/storage/cache/redis/redis.go @@ -249,7 +249,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont } // Also set the values for the primary descriptor, if they differ by - // algorithm (ie sha256 vs tarsum). + // algorithm (ie sha256 vs sha512). if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { return err diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 25640367..430da1ca 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -282,7 +282,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis } if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index e90a1993..4d2d48c1 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -396,9 +396,8 @@ type layerLinkPathSpec struct { func (layerLinkPathSpec) pathSpec() {} // blobAlgorithmReplacer does some very simple path sanitization for user -// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths -// should be "safe" before getting this far due to strict digest requirements -// but we can add further path conversion here, if needed. +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. var blobAlgorithmReplacer = strings.NewReplacer( "+", "/", ".", "/", @@ -468,10 +467,6 @@ func (repositoriesRootPathSpec) pathSpec() {} // // / // -// Most importantly, for tarsum, the layout looks like this: -// -// tarsum/// -// // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // @@ -494,19 +489,5 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) suffix = append(suffix, hex) - if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { - // We have a tarsum! - version := tsi.Version - if version == "" { - version = "v0" - } - - prefix = []string{ - "tarsum", - version, - tsi.Algorithm, - } - } - return append(prefix, suffix...), nil } diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 238e2f37..2ad78e9d 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -2,8 +2,6 @@ package storage import ( "testing" - - "github.com/docker/distribution/digest" ) func TestPathMapper(t *testing.T) { @@ -84,25 +82,6 @@ func TestPathMapper(t *testing.T) { }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, - { - spec: layerLinkPathSpec{ - name: "foo/bar", - digest: "tarsum.v1+test:abcdef", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), - }, - expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), - }, - expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", - }, { spec: uploadDataPathSpec{ From 8efb9ca329dc96191d80ba644959880d9dd88460 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Thu, 20 Aug 2015 21:50:15 -0700 Subject: [PATCH 347/501] Implementation of the Manifest Service API refactor. Add a generic Manifest interface to represent manifests in the registry and remove references to schema specific manifests. Add a ManifestBuilder to construct Manifest objects. Concrete manifest builders will exist for each manifest type and implementations will contain manifest specific data used to build a manifest. Remove Signatures() from Repository interface. Signatures are relevant only to schema1 manifests. Move access to the signature store inside the schema1 manifestStore. Add some API tests to verify signature roundtripping. schema1 ------- Change the way data is stored in schema1.Manifest to enable Payload() to be used to return complete Manifest JSON from the HTTP handler without knowledge of the schema1 protocol. tags ---- Move tag functionality to a seperate TagService and update ManifestService to use the new interfaces. Implement a driver based tagService to be backward compatible with the current tag service. Add a proxyTagService to enable the registry to get a digest for remote manifests from a tag. manifest store -------------- Remove revision store and move all signing functionality into the signed manifeststore. manifest registration --------------------- Add a mechanism to register manifest media types and to allow different manifest types to be Unmarshalled correctly. client ------ Add ManifestServiceOptions to client functions to allow tags to be passed into Put and Get for building correct registry URLs. Change functional arguments to be an interface type to allow passing data without mutating shared state. Signed-off-by: Richard Scothern Signed-off-by: Richard Scothern --- docs/api/v2/descriptors.go | 2 +- docs/client/repository.go | 324 +++++++++++++++++++------- docs/client/repository_test.go | 125 ++++++---- docs/handlers/api_test.go | 149 ++++++++++-- docs/handlers/images.go | 114 ++++----- docs/handlers/tags.go | 8 +- docs/proxy/proxymanifeststore.go | 137 +++-------- docs/proxy/proxymanifeststore_test.go | 64 +++-- docs/proxy/proxyregistry.go | 23 +- docs/proxy/proxytagservice.go | 58 +++++ docs/proxy/proxytagservice_test.go | 164 +++++++++++++ docs/storage/manifeststore.go | 159 +++++++++---- docs/storage/manifeststore_test.go | 100 ++++---- docs/storage/registry.go | 54 ++--- docs/storage/revisionstore.go | 111 --------- docs/storage/signaturestore.go | 11 - docs/storage/tagstore.go | 64 ++--- docs/storage/tagstore_test.go | 150 ++++++++++++ 18 files changed, 1161 insertions(+), 656 deletions(-) create mode 100644 docs/proxy/proxytagservice.go create mode 100644 docs/proxy/proxytagservice_test.go delete mode 100644 docs/storage/revisionstore.go create mode 100644 docs/storage/tagstore_test.go diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 7eba362a..52c725dc 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -495,7 +495,7 @@ var routeDescriptors = []RouteDescriptor{ Methods: []MethodDescriptor{ { Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ diff --git a/docs/client/repository.go b/docs/client/repository.go index bb10ece7..c609cb0a 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -3,6 +3,7 @@ package client import ( "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -14,7 +15,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" @@ -156,26 +156,139 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani }, nil } -func (r *repository) Signatures() distribution.SignatureService { - ms, _ := r.Manifests(r.context) - return &signatures{ - manifests: ms, +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Name(), } } -type signatures struct { - manifests distribution.ManifestService +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name string } -func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { - m, err := s.manifests.Get(dgst) +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) if err != nil { - return nil, err + return tags, err } - return m.Signatures() + + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = tagsResponse.Tags + return tags, nil + } + return tags, handleErrorResponse(resp) } -func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + u, err := t.ub.BuildManifestURL(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + var attempts int + resp, err := t.client.Head(u) + +check: + if err != nil { + return distribution.Descriptor{}, err + } + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + case resp.StatusCode == http.StatusMethodNotAllowed: + resp, err = t.client.Get(u) + attempts++ + if attempts > 1 { + return distribution.Descriptor{}, err + } + goto check + default: + return distribution.Descriptor{}, handleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { panic("not implemented") } @@ -186,44 +299,8 @@ type manifests struct { etags map[string]string } -func (ms *manifests) Tags() ([]string, error) { - u, err := ms.ub.BuildTagsURL(ms.name) - if err != nil { - return nil, err - } - - resp, err := ms.client.Get(u) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return nil, err - } - - return tagsResponse.Tags, nil - } - return nil, handleErrorResponse(resp) -} - -func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.ExistsByTag(dgst.String()) -} - -func (ms *manifests) ExistsByTag(tag string) (bool, error) { - u, err := ms.ub.BuildManifestURL(ms.name, tag) +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) if err != nil { return false, err } @@ -241,46 +318,63 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { return false, handleErrorResponse(resp) } -func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.GetByTag(dgst.String()) -} - -// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest -// and nil error will be returned. etag is automatically quoted when added to -// this map. +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return func(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") - } + return etagOption{tag, etag} } -func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + + var tag string for _, option := range options { - err := option(ms) - if err != nil { - return nil, err + if opt, ok := option.(withTagOption); ok { + tag = opt.tag + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } } } - u, err := ms.ub.BuildManifestURL(ms.name, tag) + var ref string + if tag != "" { + ref = tag + } else { + ref = dgst.String() + } + + u, err := ms.ub.BuildManifestURL(ms.name, ref) if err != nil { return nil, err } + req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } - if _, ok := ms.etags[tag]; ok { - req.Header.Set("If-None-Match", ms.etags[tag]) + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) } + + if _, ok := ms.etags[ref]; ok { + req.Header.Set("If-None-Match", ms.etags[ref]) + } + resp, err := ms.client.Do(req) if err != nil { return nil, err @@ -289,44 +383,89 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { - var sm schema1.SignedManifest - decoder := json.NewDecoder(resp.Body) + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) - if err := decoder.Decode(&sm); err != nil { + if err != nil { return nil, err } - return &sm, nil + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil } return nil, handleErrorResponse(resp) } -func (ms *manifests) Put(m *schema1.SignedManifest) error { - manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) - if err != nil { - return err +// WithTag allows a tag to be passed into Put which enables the client +// to build a correct URL. +func WithTag(tag string) distribution.ManifestServiceOption { + return withTagOption{tag} +} + +type withTagOption struct{ tag string } + +func (o withTagOption) Apply(m distribution.ManifestService) error { + if _, ok := m.(*manifests); ok { + return nil + } + return fmt.Errorf("withTagOption is a client-only option") +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. This state is written and read under a lock. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var tag string + + for _, option := range options { + if opt, ok := option.(withTagOption); ok { + tag = opt.tag + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } } - // todo(richardscothern): do something with options here when they become applicable - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag) if err != nil { - return err + return "", err } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + resp, err := ms.client.Do(putRequest) if err != nil { - return err + return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { - // TODO(dmcgowan): make use of digest header - return nil + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil } - return handleErrorResponse(resp) + + return "", handleErrorResponse(resp) } -func (ms *manifests) Delete(dgst digest.Digest) error { +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) if err != nil { return err @@ -348,6 +487,11 @@ func (ms *manifests) Delete(dgst digest.Digest) error { return handleErrorResponse(resp) } +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + type blobs struct { name string ub *v2.URLBuilder diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index a001b62f..c1032ec1 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -42,7 +42,6 @@ func newRandomBlob(size int) (digest.Digest, []byte) { } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -499,12 +498,7 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed panic(err) } - p, err := sm.Payload() - if err != nil { - panic(err) - } - - return sm, digest.FromBytes(p), p + return sm, digest.FromBytes(sm.Canonical), sm.Canonical } func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { @@ -525,6 +519,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeManifest}, }), } } else { @@ -534,6 +529,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeManifest}, }), } @@ -553,6 +549,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeManifest}, }), }, }) @@ -566,6 +563,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeManifest}, }), }, }) @@ -598,12 +596,17 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { return nil } -func TestManifestFetch(t *testing.T) { +func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() repo := "test.example.com/repo" m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifest(repo, dgst.String(), m1.Raw, &m) + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), pl, &m) + addTestManifest(repo, "latest", pl, &m) e, c := testServer(m) defer c() @@ -617,7 +620,7 @@ func TestManifestFetch(t *testing.T) { t.Fatal(err) } - ok, err := ms.Exists(dgst) + ok, err := ms.Exists(ctx, dgst) if err != nil { t.Fatal(err) } @@ -625,11 +628,29 @@ func TestManifestFetch(t *testing.T) { t.Fatal("Manifest does not exist") } - manifest, err := ms.Get(dgst) + manifest, err := ms.Get(ctx, dgst) if err != nil { t.Fatal(err) } - if err := checkEqualManifest(manifest, m1); err != nil { + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + manifest, err = ms.Get(ctx, dgst, WithTag("latest")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } } @@ -643,17 +664,22 @@ func TestManifestFetchWithEtag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } - ctx := context.Background() + ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } - _, err = ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, WithTag("latest"), AddEtagToTag("latest", d1.String())) if err != distribution.ErrManifestNotModified { t.Fatal(err) } @@ -690,10 +716,10 @@ func TestManifestDelete(t *testing.T) { t.Fatal(err) } - if err := ms.Delete(dgst1); err != nil { + if err := ms.Delete(ctx, dgst1); err != nil { t.Fatal(err) } - if err := ms.Delete(dgst2); err == nil { + if err := ms.Delete(ctx, dgst2); err == nil { t.Fatal("Expected error deleting unknown manifest") } // TODO(dmcgowan): Check for specific unknown error @@ -702,12 +728,17 @@ func TestManifestDelete(t *testing.T) { func TestManifestPut(t *testing.T) { repo := "test.example.com/repo/delete" m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo + "/manifests/other", - Body: m1.Raw, + Body: payload, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -731,7 +762,7 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } - if err := ms.Put(m1); err != nil { + if _, err := ms.Put(ctx, m1, WithTag(m1.Tag)); err != nil { t.Fatal(err) } @@ -751,21 +782,22 @@ func TestManifestTags(t *testing.T) { } `)) var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo + "/tags/list", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: tagsList, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(tagsList))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } e, c := testServer(m) defer c() @@ -773,22 +805,29 @@ func TestManifestTags(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() - ms, err := r.Manifests(ctx) + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) if err != nil { t.Fatal(err) } - - tags, err := ms.Tags() - if err != nil { - t.Fatal(err) - } - if len(tags) != 3 { t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) } - // TODO(dmcgowan): Check array + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } // TODO(dmcgowan): Check for error cases } @@ -821,7 +860,7 @@ func TestManifestUnauthorized(t *testing.T) { t.Fatal(err) } - _, err = ms.Get(dgst) + _, err = ms.Get(ctx, dgst) if err == nil { t.Fatal("Expected error fetching manifest") } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 7b7c3c0d..2672b77b 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -871,19 +871,15 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("unexpected error signing manifest: %v", err) } - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst := digest.FromBytes(payload) - + dgst := digest.FromBytes(signedManifest.Canonical) args.signedManifest = signedManifest args.dgst = dgst manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") - resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "putting signed manifest no error", manifestURL, signedManifest) + checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -914,11 +910,12 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m var fetchedManifest schema1.SignedManifest dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } - if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } @@ -940,10 +937,55 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error decoding fetched manifest: %v", err) } - if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { + if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } + // check signature was roundtripped + signatures, err := fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) + } + + // Re-sign, push and pull the same digest + sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) + if err != nil { + t.Fatal(err) + + } + + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "re-fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // check two signatures were roundtripped + signatures, err = fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 2 { + t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) + } + // Get by name with etag, gives 304 etag := resp.Header.Get("Etag") req, err := http.NewRequest("GET", manifestURL, nil) @@ -956,7 +998,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("Error constructing request: %s", err) } - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) // Get by digest with etag, gives 304 req, err = http.NewRequest("GET", manifestDigestURL, nil) @@ -969,7 +1011,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("Error constructing request: %s", err) } - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) // Ensure that the tag is listed. resp, err = http.Get(tagsURL) @@ -1143,8 +1185,13 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { var body []byte + if sm, ok := v.(*schema1.SignedManifest); ok { - body = sm.Raw + _, pl, err := sm.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl } else { var err error body, err = json.MarshalIndent(v, "", " ") @@ -1435,7 +1482,7 @@ func checkErr(t *testing.T, err error, msg string) { } } -func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, @@ -1459,7 +1506,6 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() - if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } @@ -1477,20 +1523,22 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) t.Fatalf("unexpected error signing manifest: %v", err) } - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") + dgst := digest.FromBytes(signedManifest.Canonical) - dgst := digest.FromBytes(payload) - - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // Create this repository by tag to ensure the tag mapping is made in the registry + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, tag) checkErr(t, err, "building manifest url") + location, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building location URL") + resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, + "Location": []string{location}, "Docker-Content-Digest": []string{dgst.String()}, }) + return dgst } // Test mutation operations on a registry configured as a cache. Ensure that they return @@ -1577,3 +1625,64 @@ func TestCheckContextNotifier(t *testing.T) { t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) } } + +func TestProxyManifestGetByTag(t *testing.T) { + truthConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + truthConfig.HTTP.Headers = headerConfig + + imageName := "foo/bar" + tag := "latest" + + truthEnv := newTestEnvWithConfig(t, &truthConfig) + // create a repository in the truth registry + dgst := createRepository(truthEnv, t, imageName, tag) + + proxyConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + Proxy: configuration.Proxy{ + RemoteURL: truthEnv.server.URL, + }, + } + proxyConfig.HTTP.Headers = headerConfig + + proxyEnv := newTestEnvWithConfig(t, &proxyConfig) + + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp, err := http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest from proxy by digest") + defer resp.Body.Close() + + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(imageName, tag) + checkErr(t, err, "building manifest url") + + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // Create another manifest in the remote with the same image/tag pair + newDigest := createRepository(truthEnv, t, imageName, tag) + if dgst == newDigest { + t.Fatalf("non-random test data") + } + + // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{newDigest.String()}, + }) +} diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 2ec51b99..be14b00a 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -2,19 +2,15 @@ package handlers import ( "bytes" - "encoding/json" "fmt" "net/http" - "strings" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" - "golang.org/x/net/context" ) // imageManifestDispatcher takes the request context and builds the @@ -33,7 +29,8 @@ func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { } mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), } if !ctx.readOnly { @@ -54,6 +51,8 @@ type imageManifestHandler struct { } // GetImageManifest fetches the image manifest from the storage backend, if it exists. +// todo(richardscothern): this assumes v2 schema 1 manifests for now but in the future +// get the version from the Accept HTTP header func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) @@ -62,42 +61,38 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } - var sm *schema1.SignedManifest + var manifest distribution.Manifest if imh.Tag != "" { - sm, err = manifests.GetByTag(imh.Tag) - } else { - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } - sm, err = manifests.Get(imh.Digest) + imh.Digest = desc.Digest } + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + manifest, err = manifests.Get(imh, imh.Digest) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } - // Get the digest, if we don't already have it. - if imh.Digest == "" { - dgst, err := digestManifest(imh, sm) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - return - } - if etagMatch(r, dgst.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - imh.Digest = dgst + ct, p, err := manifest.Payload() + if err != nil { + return } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(sm.Raw) + w.Write(p) } func etagMatch(r *http.Request, etag string) bool { @@ -109,7 +104,7 @@ func etagMatch(r *http.Request, etag string) bool { return false } -// PutImageManifest validates and stores and image in the registry. +// PutImageManifest validates and stores an image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") manifests, err := imh.Repository.Manifests(imh) @@ -124,39 +119,28 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - var manifest schema1.SignedManifest - if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) + if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } - dgst, err := digestManifest(imh, &manifest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - return - } - - // Validate manifest tag or digest matches payload - if imh.Tag != "" { - if manifest.Tag != imh.Tag { - ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) - return - } - - imh.Digest = dgst - } else if imh.Digest != "" { - if dgst != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) + if imh.Digest != "" { + if desc.Digest != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } + } else if imh.Tag != "" { + imh.Digest = desc.Digest } else { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) return } - if err := manifests.Put(&manifest); err != nil { + _, err = manifests.Put(imh, manifest) + if err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. if err == distribution.ErrUnsupported { @@ -188,6 +172,17 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + // Construct a canonical url for the uploaded manifest. location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) if err != nil { @@ -212,7 +207,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h return } - err = manifests.Delete(imh.Digest) + err = manifests.Delete(imh, imh.Digest) if err != nil { switch err { case digest.ErrDigestUnsupported: @@ -233,22 +228,3 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h w.WriteHeader(http.StatusAccepted) } - -// digestManifest takes a digest of the given manifest. This belongs somewhere -// better but we'll wait for a refactoring cycle to find that real somewhere. -func digestManifest(ctx context.Context, sm *schema1.SignedManifest) (digest.Digest, error) { - p, err := sm.Payload() - if err != nil { - if !strings.Contains(err.Error(), "missing signature key") { - ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) - return "", err - } - - // NOTE(stevvooe): There are no signatures but we still have a - // payload. The request will fail later but this is not the - // responsibility of this part of the code. - p = sm.Raw - } - - return digest.FromBytes(p), nil -} diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 54725585..d9f0106c 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -34,13 +34,9 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - manifests, err := th.Repository.Manifests(th) - if err != nil { - th.Errors = append(th.Errors, err) - return - } - tags, err := manifests.Tags() + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 1e9e24de..13cb5f6b 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -6,8 +6,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -24,8 +22,8 @@ type proxyManifestStore struct { var _ distribution.ManifestService = &proxyManifestStore{} -func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { - exists, err := pms.localManifests.Exists(dgst) +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) if err != nil { return false, err } @@ -33,117 +31,56 @@ func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } - return pms.remoteManifests.Exists(dgst) + return pms.remoteManifests.Exists(ctx, dgst) } -func (pms proxyManifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - sm, err := pms.localManifests.Get(dgst) - if err == nil { - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - return sm, err +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) + if err != nil { + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true } - sm, err = pms.remoteManifests.Get(dgst) + _, payload, err := manifest.Payload() if err != nil { return nil, err } - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - err = pms.localManifests.Put(sm) - if err != nil { - return nil, err + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) + + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } + + // Schedule the repo for removal + pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + + // Ensure the manifest blob is cleaned up + pms.scheduler.AddBlob(dgst.String(), repositoryTTL) } - // Schedule the repo for removal - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - - return sm, err + return manifest, err } -func (pms proxyManifestStore) Tags() ([]string, error) { - return pms.localManifests.Tags() +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported } -func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { - exists, err := pms.localManifests.ExistsByTag(tag) - if err != nil { - return false, err - } - if exists { - return true, nil - } - - return pms.remoteManifests.ExistsByTag(tag) -} - -func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - var localDigest digest.Digest - - localManifest, err := pms.localManifests.GetByTag(tag, options...) - switch err.(type) { - case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: - goto fromremote - case nil: - break - default: - return nil, err - } - - localDigest, err = manifestDigest(localManifest) - if err != nil { - return nil, err - } - -fromremote: - var sm *schema1.SignedManifest - sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) - if err != nil && err != distribution.ErrManifestNotModified { - return nil, err - } - - if err == distribution.ErrManifestNotModified { - context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) - return localManifest, nil - } - context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) - - err = pms.localManifests.Put(sm) - if err != nil { - return nil, err - } - - dgst, err := manifestDigest(sm) - if err != nil { - return nil, err - } - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - - return sm, err -} - -func manifestDigest(sm *schema1.SignedManifest) (digest.Digest, error) { - payload, err := sm.Payload() - if err != nil { - return "", err - - } - - return digest.FromBytes(payload), nil -} - -func (pms proxyManifestStore) Put(manifest *schema1.SignedManifest) error { +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } -func (pms proxyManifestStore) Delete(dgst digest.Digest) error { - return distribution.ErrUnsupported +/*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + return 0, distribution.ErrUnsupported } +*/ diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index a5a0a21b..aeecae10 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -37,40 +37,31 @@ func (te manifestStoreTestEnv) RemoteStats() *map[string]int { return &rs } -func (sm statsManifest) Delete(dgst digest.Digest) error { +func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { sm.stats["delete"]++ - return sm.manifests.Delete(dgst) + return sm.manifests.Delete(ctx, dgst) } -func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { +func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { sm.stats["exists"]++ - return sm.manifests.Exists(dgst) + return sm.manifests.Exists(ctx, dgst) } -func (sm statsManifest) ExistsByTag(tag string) (bool, error) { - sm.stats["existbytag"]++ - return sm.manifests.ExistsByTag(tag) -} - -func (sm statsManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { sm.stats["get"]++ - return sm.manifests.Get(dgst) + return sm.manifests.Get(ctx, dgst) } -func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - sm.stats["getbytag"]++ - return sm.manifests.GetByTag(tag, options...) -} - -func (sm statsManifest) Put(manifest *schema1.SignedManifest) error { +func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { sm.stats["put"]++ - return sm.manifests.Put(manifest) + return sm.manifests.Put(ctx, manifest) } -func (sm statsManifest) Tags() ([]string, error) { - sm.stats["tags"]++ - return sm.manifests.Tags() +/*func (sm statsManifest) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + sm.stats["enumerate"]++ + return sm.manifests.Enumerate(ctx, manifests, last) } +*/ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() @@ -169,15 +160,12 @@ func populateRepo(t *testing.T, ctx context.Context, repository distribution.Rep if err != nil { t.Fatalf(err.Error()) } - ms.Put(sm) + dgst, err := ms.Put(ctx, sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } - pl, err := sm.Payload() - if err != nil { - t.Fatal(err) - } - return digest.FromBytes(pl), nil + + return dgst, nil } // TestProxyManifests contains basic acceptance tests @@ -189,8 +177,9 @@ func TestProxyManifests(t *testing.T) { localStats := env.LocalStats() remoteStats := env.RemoteStats() + ctx := context.Background() // Stat - must check local and remote - exists, err := env.manifests.ExistsByTag("latest") + exists, err := env.manifests.Exists(ctx, env.manifestDigest) if err != nil { t.Fatalf("Error checking existance") } @@ -198,15 +187,16 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Unexpected non-existant manifest") } - if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { - t.Errorf("Unexpected exists count") + if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) } // Get - should succeed and pull manifest into local - _, err = env.manifests.Get(env.manifestDigest) + _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { t.Errorf("Unexpected get count") } @@ -216,7 +206,7 @@ func TestProxyManifests(t *testing.T) { } // Stat - should only go to local - exists, err = env.manifests.ExistsByTag("latest") + exists, err = env.manifests.Exists(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } @@ -224,19 +214,21 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Unexpected non-existant manifest") } - if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { + if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { t.Errorf("Unexpected exists count") - } // Get - should get from remote, to test freshness - _, err = env.manifests.Get(env.manifestDigest) + _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } - if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { + if (*remoteStats)["get"] != 2 && (*remoteStats)["exists"] != 1 && (*localStats)["put"] != 1 { t.Errorf("Unexpected get count") } +} + +func TestProxyTagService(t *testing.T) { } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index 8a5f5ef6..8e1be5f2 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -42,6 +42,7 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name s.OnManifestExpire(func(repoName string) error { return v.RemoveRepository(repoName) }) + err = s.Start() if err != nil { return nil, err @@ -78,7 +79,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri if err != nil { return nil, err } - localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) if err != nil { return nil, err } @@ -106,8 +107,11 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri ctx: ctx, scheduler: pr.scheduler, }, - name: name, - signatures: localRepo.Signatures(), + name: name, + tags: proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + }, }, nil } @@ -115,14 +119,13 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri // locally, or pulling it through from a remote and caching it locally if it doesn't // already exist type proxiedRepository struct { - blobStore distribution.BlobStore - manifests distribution.ManifestService - name string - signatures distribution.SignatureService + blobStore distribution.BlobStore + manifests distribution.ManifestService + name string + tags distribution.TagService } func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // options return pr.manifests, nil } @@ -134,6 +137,6 @@ func (pr *proxiedRepository) Name() string { return pr.name } -func (pr *proxiedRepository) Signatures() distribution.SignatureService { - return pr.signatures +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags } diff --git a/docs/proxy/proxytagservice.go b/docs/proxy/proxytagservice.go new file mode 100644 index 00000000..c52460c4 --- /dev/null +++ b/docs/proxy/proxytagservice.go @@ -0,0 +1,58 @@ +package proxy + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + desc, err = pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/docs/proxy/proxytagservice_test.go b/docs/proxy/proxytagservice_test.go new file mode 100644 index 00000000..8d9518c0 --- /dev/null +++ b/docs/proxy/proxytagservice_test.go @@ -0,0 +1,164 @@ +package proxy + +import ( + "sort" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type mockTagStore struct { + mapping map[string]distribution.Descriptor + sync.Mutex +} + +var _ distribution.TagService = &mockTagStore{} + +func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + m.Lock() + defer m.Unlock() + + if d, ok := m.mapping[tag]; ok { + return d, nil + } + return distribution.Descriptor{}, distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + m.Lock() + defer m.Unlock() + + m.mapping[tag] = desc + return nil +} + +func (m *mockTagStore) Untag(ctx context.Context, tag string) error { + m.Lock() + defer m.Unlock() + + if _, ok := m.mapping[tag]; ok { + delete(m.mapping, tag) + return nil + } + return distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) All(ctx context.Context) ([]string, error) { + m.Lock() + defer m.Unlock() + + var tags []string + for tag := range m.mapping { + tags = append(tags, tag) + } + + return tags, nil +} + +func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { + if local == nil { + local = make(map[string]distribution.Descriptor) + } + if remote == nil { + remote = make(map[string]distribution.Descriptor) + } + return &proxyTagService{ + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + } +} + +func TestGet(t *testing.T) { + remoteDesc := distribution.Descriptor{Size: 42} + remoteTag := "remote" + proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) + + ctx := context.Background() + + // Get pre-loaded tag + d, err := proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if d != remoteDesc { + t.Fatal("unable to get put tag") + } + + local, err := proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + if local != remoteDesc { + t.Fatalf("unexpected descriptor pulled through") + } + + // Manually overwrite remote tag + newRemoteDesc := distribution.Descriptor{Size: 43} + err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) + if err != nil { + t.Fatal(err) + } + + d, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if d != newRemoteDesc { + t.Fatal("unable to get put tag") + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + // untag, ensure it's removed locally, but present in remote + err = proxyTags.Untag(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err == nil { + t.Fatalf("Expected error getting Untag'd tag") + } + + _, err = proxyTags.remoteTags.Get(ctx, remoteTag) + if err != nil { + t.Fatalf("remote tag should not be untagged with proxyTag.Untag") + } + + _, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("untagged tag should be pulled through") + } + + // Add another tag. Ensure both tags appear in enumerate + err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) + if err != nil { + t.Fatal(err) + } + + all, err := proxyTags.All(ctx) + if err != nil { + t.Fatal(err) + } + + if len(all) != 2 { + t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) + } + + sort.Strings(all) + if all[0] != "funtag" && all[1] != "remote" { + t.Fatalf("Unexpected tags returned from All() : %v ", all) + } +} diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 024c8e4b..73061592 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -1,6 +1,7 @@ package storage import ( + "encoding/json" "fmt" "github.com/docker/distribution" @@ -11,20 +12,21 @@ import ( "github.com/docker/libtrust" ) +// manifestStore is a storage driver based store for storing schema1 manifests. type manifestStore struct { repository *repository - revisionStore *revisionStore - tagStore *tagStore + blobStore *linkedBlobStore ctx context.Context + signatures *signatureStore skipDependencyVerification bool } var _ distribution.ManifestService = &manifestStore{} -func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + _, err := ms.blobStore.Stat(ms.ctx, dgst) if err != nil { if err == distribution.ErrBlobUnknown { return false, nil @@ -36,76 +38,131 @@ func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } -func (ms *manifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(ms.ctx, dgst) + // Ensure that this revision is available in this repository. + _, err := ms.blobStore.Stat(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Name(), + Revision: dgst, + } + } + + return nil, err + } + + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Name(), + Revision: dgst, + } + } + + return nil, err + } + + // Fetch the signatures for the manifest + signatures, err := ms.signatures.Get(dgst) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + + return &sm, nil } -// SkipLayerVerification allows a manifest to be Put before it's +// SkipLayerVerification allows a manifest to be Put before its // layers are on the filesystem -func SkipLayerVerification(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifestStore); ok { +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { ms.skipDependencyVerification = true return nil } return fmt.Errorf("skip layer verification only valid for manifestStore") } -func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - if err := ms.verifyManifest(ms.ctx, manifest); err != nil { - return err + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-v1 manifest put to signed manifestStore: %T", manifest) } - // Store the revision of the manifest - revision, err := ms.revisionStore.put(ms.ctx, manifest) + if err := ms.verifyManifest(ms.ctx, *sm); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { - return err + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err } - // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision.Digest) + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } + + return revision.Digest, nil } // Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(dgst digest.Digest) error { +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.revisionStore.delete(ms.ctx, dgst) + return ms.blobStore.Delete(ctx, dgst) } -func (ms *manifestStore) Tags() ([]string, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") - return ms.tagStore.exists(tag) -} - -func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - for _, option := range options { - err := option(ms) - if err != nil { - return nil, err - } - } - - context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") - dgst, err := ms.tagStore.resolve(tag) - if err != nil { - return nil, err - } - - return ms.revisionStore.get(ms.ctx, dgst) +func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + return 0, distribution.ErrUnsupported } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { +// content, leaving trust policies of that content up to consumems. +func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest) error { var errs distribution.ErrManifestVerification if len(mnfst.Name) > reference.NameTotalLengthMax { @@ -129,7 +186,7 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.Sign len(mnfst.History), len(mnfst.FSLayers))) } - if _, err := schema1.Verify(mnfst); err != nil { + if _, err := schema1.Verify(&mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: errs = append(errs, distribution.ErrManifestUnverified{}) @@ -143,15 +200,15 @@ func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.Sign } if !ms.skipDependencyVerification { - for _, fsLayer := range mnfst.FSLayers { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) } - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) + // On error here, we always append unknown blob erroms. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) } } } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index de31b364..a41feb04 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -30,7 +30,8 @@ type manifestStoreTestEnv struct { func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( + memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -58,24 +59,6 @@ func TestManifestStorage(t *testing.T) { t.Fatal(err) } - exists, err := ms.ExistsByTag(env.tag) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) - } - - if exists { - t.Fatalf("manifest should not exist") - } - - if _, err := ms.GetByTag(env.tag); true { - switch err.(type) { - case distribution.ErrManifestUnknown: - break - default: - t.Fatalf("expected manifest unknown error: %#v", err) - } - } - m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, @@ -114,7 +97,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err == nil { t.Fatalf("expected errors putting manifest with full verification") } @@ -150,30 +133,40 @@ func TestManifestStorage(t *testing.T) { } } - if err = ms.Put(sm); err != nil { + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - exists, err = ms.ExistsByTag(env.tag) + exists, err := ms.Exists(ctx, manifestDigest) if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) + t.Fatalf("unexpected error checking manifest existence: %#v", err) } if !exists { t.Fatalf("manifest should exist") } - fetchedManifest, err := ms.GetByTag(env.tag) - + fromStore, err := ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + if !reflect.DeepEqual(fetchedManifest, sm) { t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) } - fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -185,8 +178,9 @@ func TestManifestStorage(t *testing.T) { // Now that we have a payload, take a moment to check that the manifest is // return by the payload digest. + dgst := digest.FromBytes(payload) - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) } @@ -195,7 +189,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest %s should exist", dgst) } - fetchedByDigest, err := ms.Get(dgst) + fetchedByDigest, err := ms.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } @@ -213,20 +207,6 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) } - // Grabs the tags and check that this tagged manifest is present - tags, err := ms.Tags() - if err != nil { - t.Fatalf("unexpected error fetching tags: %v", err) - } - - if len(tags) != 1 { - t.Fatalf("unexpected tags returned: %v", tags) - } - - if tags[0] != env.tag { - t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) - } - // Now, push the same manifest with a different key pk2, err := libtrust.GenerateECP256PrivateKey() if err != nil { @@ -237,8 +217,12 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } - jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -252,15 +236,20 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } - if err = ms.Put(sm2); err != nil { + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - fetched, err := ms.GetByTag(env.tag) + fromStore, err = ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } @@ -276,7 +265,12 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error getting expected signatures: %v", err) } - receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -302,12 +296,12 @@ func TestManifestStorage(t *testing.T) { } // Test deleting manifests - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -315,7 +309,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest should not exist") } - deletedManifest, err := ms.Get(dgst) + deletedManifest, err := ms.Get(ctx, dgst) if err == nil { t.Errorf("Unexpected success getting deleted manifest") } @@ -331,12 +325,12 @@ func TestManifestStorage(t *testing.T) { } // Re-upload should restore manifest to a good state - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err != nil { t.Errorf("Error re-uploading deleted manifest") } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -344,7 +338,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Restored manifest should exist") } - deletedManifest, err = ms.Get(dgst) + deletedManifest, err = ms.Get(ctx, dgst) if err != nil { t.Errorf("Unexpected error getting manifest") } @@ -364,7 +358,7 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatal(err) } - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err == nil { t.Errorf("Unexpected success deleting while disabled") } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 5ef06d53..c58b91d8 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -145,6 +145,15 @@ func (repo *repository) Name() string { return repo.name } +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. @@ -159,36 +168,31 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M ms := &manifestStore{ ctx: ctx, repository: repo, - revisionStore: &revisionStore{ - ctx: ctx, - repository: repo, - blobStore: &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - resumableDigestEnabled: repo.resumableDigestEnabled, + blobStore: &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, }, - tagStore: &tagStore{ + signatures: &signatureStore{ ctx: ctx, repository: repo, - blobStore: repo.registry.blobStore, + blobStore: repo.blobStore, }, } // Apply options for _, option := range options { - err := option(ms) + err := option.Apply(ms) if err != nil { return nil, err } @@ -225,11 +229,3 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { resumableDigestEnabled: repo.resumableDigestEnabled, } } - -func (repo *repository) Signatures() distribution.SignatureService { - return &signatureStore{ - repository: repo, - blobStore: repo.blobStore, - ctx: repo.ctx, - } -} diff --git a/docs/storage/revisionstore.go b/docs/storage/revisionstore.go deleted file mode 100644 index ed2d5dd3..00000000 --- a/docs/storage/revisionstore.go +++ /dev/null @@ -1,111 +0,0 @@ -package storage - -import ( - "encoding/json" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/libtrust" -) - -// revisionStore supports storing and managing manifest revisions. -type revisionStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -// get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*schema1.SignedManifest, error) { - // Ensure that this revision is available in this repository. - _, err := rs.blobStore.Stat(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := rs.blobStore.Get(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // Fetch the signatures for the manifest - signatures, err := rs.repository.Signatures().Get(revision) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - - return &sm, nil -} - -// put stores the manifest in the repository, if not already present. Any -// updated signatures will be stored, as well. -func (rs *revisionStore) put(ctx context.Context, sm *schema1.SignedManifest) (distribution.Descriptor, error) { - // Resolve the payload in the manifest. - payload, err := sm.Payload() - if err != nil { - return distribution.Descriptor{}, err - } - - // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.Put(ctx, schema1.ManifestMediaType, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return distribution.Descriptor{}, err - } - - // Link the revision into the repository. - if err := rs.blobStore.linkBlob(ctx, revision); err != nil { - return distribution.Descriptor{}, err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return distribution.Descriptor{}, err - } - - if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { - return distribution.Descriptor{}, err - } - - return revision, nil -} - -func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { - return rs.blobStore.Delete(ctx, revision) -} diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index f5888f64..ede4e0e2 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -4,7 +4,6 @@ import ( "path" "sync" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) @@ -15,16 +14,6 @@ type signatureStore struct { ctx context.Context } -func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { - return &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - } -} - -var _ distribution.SignatureService = &signatureStore{} - func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ name: s.repository.Name(), diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index aec95286..167c7fa0 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -9,37 +9,41 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) +var _ distribution.TagService = &tagStore{} + // tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects type tagStore struct { repository *repository blobStore *blobStore - ctx context.Context } -// tags lists the manifest tags for the specified repository. -func (ts *tagStore) tags() ([]string, error) { - p, err := pathFor(manifestTagPathSpec{ +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + + pathSpec, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), }) - if err != nil { - return nil, err + return tags, err } - var tags []string - entries, err := ts.blobStore.driver.List(ts.ctx, p) + entries, err := ts.blobStore.driver.List(ctx, pathSpec) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} default: - return nil, err + return tags, err } } for _, entry := range entries { _, filename := path.Split(entry) - tags = append(tags, filename) } @@ -47,7 +51,7 @@ func (ts *tagStore) tags() ([]string, error) { } // exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(tag string) (bool, error) { +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, @@ -57,7 +61,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { return false, err } - exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) + exists, err := exists(ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } @@ -65,9 +69,9 @@ func (ts *tagStore) exists(tag string) (bool, error) { return exists, nil } -// tag tags the digest with the given tag, updating the the store to point at +// Tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. -func (ts *tagStore) tag(tag string, revision digest.Digest) error { +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, @@ -77,43 +81,44 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { return err } - nbs := ts.linkedBlobStore(ts.ctx, tag) + lbs := ts.linkedBlobStore(ctx, tag) + // Link into the index - if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { + if err := lbs.linkBlob(ctx, desc); err != nil { return err } // Overwrite the current link - return ts.blobStore.link(ts.ctx, currentPath, revision) + return ts.blobStore.link(ctx, currentPath, desc.Digest) } // resolve the current revision for name and tag. -func (ts *tagStore) resolve(tag string) (digest.Digest, error) { +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name(), tag: tag, }) if err != nil { - return "", err + return distribution.Descriptor{}, err } - revision, err := ts.blobStore.readlink(ts.ctx, currentPath) + revision, err := ts.blobStore.readlink(ctx, currentPath) if err != nil { switch err.(type) { case storagedriver.PathNotFoundError: - return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} } - return "", err + return distribution.Descriptor{}, err } - return revision, nil + return distribution.Descriptor{Digest: revision}, nil } // delete removes the tag from repository, including the history of all // revisions that have the specified tag. -func (ts *tagStore) delete(tag string) error { +func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), tag: tag, @@ -123,7 +128,7 @@ func (ts *tagStore) delete(tag string) error { return err } - return ts.blobStore.driver.Delete(ts.ctx, tagPath) + return ts.blobStore.driver.Delete(ctx, tagPath) } // linkedBlobStore returns the linkedBlobStore for the named tag, allowing one @@ -145,3 +150,10 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob }}, } } + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + // An efficient implementation of this will require changes to the S3 driver. + return make([]string, 0), nil +} diff --git a/docs/storage/tagstore_test.go b/docs/storage/tagstore_test.go new file mode 100644 index 00000000..79660199 --- /dev/null +++ b/docs/storage/tagstore_test.go @@ -0,0 +1,150 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repo, err := reg.Repository(ctx, "a/b") + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err == nil { + t.Errorf("Expected error untagging non-existant tag") + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + _, err = tags.Get(ctx, "latest") + if err == nil { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} From dc6944d91da0a999ac74f2d5408101c790003c1d Mon Sep 17 00:00:00 2001 From: "weiyuan.yl" Date: Mon, 28 Dec 2015 11:28:01 +0800 Subject: [PATCH 348/501] In HEAD request for missing resource, only 404 NOT FOUND is returned Change-Id: I73caf67b59519e6f4f82f7d78f5d4fd4ad9affcd Signed-off-by: weiyuan.yl --- docs/storage/driver/oss/oss.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 4dfe5675..590c6a53 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -754,7 +754,7 @@ func (d *driver) ossPath(path string) string { } func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == 404 && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { return storagedriver.PathNotFoundError{Path: path} } From 9c7dc47d806a157e01be1c51d11842e69b614669 Mon Sep 17 00:00:00 2001 From: yuzou Date: Thu, 15 Oct 2015 16:56:28 +0800 Subject: [PATCH 349/501] use the scheme and host from x-forward-proto and x-forward-host if they exits and correct the scheme for Location header during image upload Signed-off-by: yuzou --- docs/api/v2/urls.go | 4 +++- docs/api/v2/urls_test.go | 49 ++++++++++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 42974394..6ba39cc9 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -204,7 +204,9 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL.Path = routeURL.Path[1:] } - return cr.root.ResolveReference(routeURL), nil + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil } // appendValuesURL appends the parameters to the url. diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 16e05695..0ad33add 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -166,6 +166,11 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com", @@ -197,15 +202,26 @@ func TestBuilderFromRequest(t *testing.T) { } for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() + buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } - expectedURL := tr.base + testCase.expectedPath + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = tr.base + testCase.expectedPath + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = urlBase.String() + testCase.expectedPath + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } @@ -229,6 +245,11 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com/prefix/", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", @@ -253,15 +274,25 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { } for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() + buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = tr.base[0:len(tr.base)-1] + testCase.expectedPath + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + testCase.expectedPath + } - expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } From cf4fdc1be00129df6c5a76b3e8e77b18486afe4b Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 28 Dec 2015 11:04:58 -0800 Subject: [PATCH 350/501] Serve blobs when a storage driver supports redirects but are disabled Fixes issue where an error was returned instead of serving the blob Signed-off-by: Brian Bland --- docs/storage/blobserver.go | 66 +++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/docs/storage/blobserver.go b/docs/storage/blobserver.go index 45f81f53..2655e011 100644 --- a/docs/storage/blobserver.go +++ b/docs/storage/blobserver.go @@ -34,45 +34,45 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h return err } - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - - switch err.(type) { - case nil: - if bs.redirect { + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err - } - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. return err } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil } - // Some unexpected error. - return err + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil } From 165507a6220820658a3486b4d18bbb94e3aa60fc Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 28 Dec 2015 15:22:28 -0800 Subject: [PATCH 351/501] Relaxes filesystem driver permissions to 0777 (dirs) and 0666 (files) Leaves any further permissions restrictions to the process umask Signed-off-by: Brian Bland --- docs/storage/driver/filesystem/driver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 480bd687..5b495818 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -135,11 +135,11 @@ func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, fullPath := d.fullPath(subPath) parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0755); err != nil { + if err := os.MkdirAll(parentDir, 0777); err != nil { return 0, err } - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { // TODO(stevvooe): A few missing conditions in storage driver: // 1. What if the path is already a directory? From 5dc714b3471b2832841d12884892dfa90ca24fd6 Mon Sep 17 00:00:00 2001 From: "weiyuan.yl" Date: Tue, 29 Dec 2015 12:09:04 +0800 Subject: [PATCH 352/501] Replace 404 to http.StatusNotFound Change-Id: Ia100975cb93c0a6d94ea5542b1c9ce386bc87649 Signed-off-by: weiyuan.yl --- docs/storage/driver/oss/oss.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 590c6a53..67215bc2 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -430,7 +430,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea if offset > 0 { resp, err := d.Bucket.Head(d.ossPath(path), nil) if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != 404 { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { return 0, err } } @@ -729,8 +729,8 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int method, ok := options["method"] if ok { methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "PUT") { - return "", storagedriver.ErrUnsupportedMethod{driverName} + if !ok || (methodString != "GET") { + return "", storagedriver.ErrUnsupportedMethod{} } } @@ -754,7 +754,7 @@ func (d *driver) ossPath(path string) string { } func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == 404 && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { return storagedriver.PathNotFoundError{Path: path} } From b89c4e8cbf87cd8881c29df182244a2ad2bbdc20 Mon Sep 17 00:00:00 2001 From: Kenny Leung Date: Tue, 8 Dec 2015 14:24:03 -0800 Subject: [PATCH 353/501] Print error for failed HTTP auth request. Signed-off-by: Kenny Leung --- docs/client/auth/session.go | 3 ++- docs/client/blob_writer.go | 2 +- docs/client/errors.go | 6 +++++- docs/client/repository.go | 22 +++++++++++----------- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 6c92fc34..8594b66f 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -240,7 +240,8 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := client.HandleErrorResponse(resp) + return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s: %q", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode), err) } decoder := json.NewDecoder(resp.Body) diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index c7eee4e8..21a018dc 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -33,7 +33,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { diff --git a/docs/client/errors.go b/docs/client/errors.go index 7305c021..8e3cb108 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -47,7 +47,11 @@ func parseHTTPErrorResponse(r io.Reader) error { return errors } -func handleErrorResponse(resp *http.Response) error { +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { diff --git a/docs/client/repository.go b/docs/client/repository.go index 9d489dd5..758c6e5e 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -91,7 +91,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri returnErr = io.EOF } } else { - return 0, handleErrorResponse(resp) + return 0, HandleErrorResponse(resp) } return numFilled, returnErr @@ -203,7 +203,7 @@ func (t *tags) All(ctx context.Context) ([]string, error) { tags = tagsResponse.Tags return tags, nil } - return tags, handleErrorResponse(resp) + return tags, HandleErrorResponse(resp) } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { @@ -276,7 +276,7 @@ check: } goto check default: - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } } @@ -315,7 +315,7 @@ func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, erro } else if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, handleErrorResponse(resp) + return false, HandleErrorResponse(resp) } // AddEtagToTag allows a client to supply an eTag to Get which will be @@ -395,7 +395,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } return m, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } // WithTag allows a tag to be passed into Put which enables the client @@ -462,7 +462,7 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . return dgst, nil } - return "", handleErrorResponse(resp) + return "", HandleErrorResponse(resp) } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { @@ -484,7 +484,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 @@ -541,7 +541,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) }), nil } @@ -597,7 +597,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { location: location, }, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -645,7 +645,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -682,7 +682,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { From fea0a7ed4920e8cf0f150558ce8557242d8ccbe2 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 5 Jan 2016 11:22:40 -0800 Subject: [PATCH 354/501] Remove tags referencing deleted manifests. When a manifest is deleted by digest, look up the referenced tags in the tag store and remove all associations. Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 72 +++++++++++++++++++++++++++++++++++ docs/handlers/images.go | 14 +++++++ docs/storage/tagstore.go | 44 ++++++++++++++++++--- docs/storage/tagstore_test.go | 58 +++++++++++++++++++++++++++- 4 files changed, 181 insertions(+), 7 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 2672b77b..0eb79ec8 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1063,6 +1063,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { dgst := args.dgst signedManifest := args.signedManifest manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // --------------- // Delete by digest resp, err := httpDelete(manifestDigestURL) @@ -1118,6 +1119,77 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { checkErr(t, err, "delting unknown manifest by digest") checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + // -------------------- + // Uupload manifest by tag + tag := signedManifest.Tag + manifestTagURL, err := env.builder.BuildManifestURL(imageName, tag) + resp = putManifest(t, "putting signed manifest by tag", manifestTagURL, signedManifest) + checkResponse(t, "putting signed manifest by tag", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + var tagsResponse tagsAPIResponse + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // --------------- + // Delete by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // Ensure that the tag is not listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 0 { + t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) + } + } type testEnv struct { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index be14b00a..a5bca11d 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -226,5 +226,19 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h } } + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } + } + w.WriteHeader(http.StatusAccepted) } diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 167c7fa0..df6e8dfa 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -116,15 +116,19 @@ func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descripto return distribution.Descriptor{Digest: revision}, nil } -// delete removes the tag from repository, including the history of all -// revisions that have the specified tag. +// Untag removes the tag association func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name(), tag: tag, }) - if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: return err } @@ -153,7 +157,35 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob // Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by // digest, tag entries which point to it need to be recovered to avoid dangling tags. -func (ts *tagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - // An efficient implementation of this will require changes to the S3 driver. - return make([]string, 0), nil +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil } diff --git a/docs/storage/tagstore_test.go b/docs/storage/tagstore_test.go index 79660199..c257adea 100644 --- a/docs/storage/tagstore_test.go +++ b/docs/storage/tagstore_test.go @@ -102,7 +102,7 @@ func TestTagStoreUnTag(t *testing.T) { } } -func TestTagAll(t *testing.T) { +func TestTagStoreAll(t *testing.T) { env := testTagStore(t) tagStore := env.ts ctx := env.ctx @@ -148,3 +148,59 @@ func TestTagAll(t *testing.T) { } } + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} From bf1e41a9f286cf93aee7a5a7d2d7c73e45674a9d Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Wed, 6 Jan 2016 11:47:28 +0000 Subject: [PATCH 355/501] GCS driver: fix retry function Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 6 ++--- docs/storage/driver/gcs/gcs_test.go | 40 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 4cef972c..bb291b03 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -318,13 +318,13 @@ func retry(maxTries int, req request) error { backoff := time.Second var err error for i := 0; i < maxTries; i++ { - err := req() + err = req() if err == nil { return nil } - status := err.(*googleapi.Error) - if status == nil || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { return err } diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 7afc4e70..4062b2a3 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -3,10 +3,13 @@ package gcs import ( + "fmt" "io/ioutil" "os" "testing" + "google.golang.org/api/googleapi" + ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" @@ -55,6 +58,43 @@ func init() { }, skipGCS) } +func TestRetry(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + assertError := func(expected string, observed error) { + observedMsg := "" + if observed != nil { + observedMsg = observed.Error() + } + if observedMsg != expected { + t.Fatalf("expected %v, observed %v\n", expected, observedMsg) + } + } + + err := retry(2, func() error { + return &googleapi.Error{ + Code: 503, + Message: "google api error", + } + }) + assertError("googleapi: Error 503: google api error", err) + + err = retry(2, func() error { + return &googleapi.Error{ + Code: 404, + Message: "google api error", + } + }) + assertError("googleapi: Error 404: google api error", err) + + err = retry(2, func() error { + return fmt.Errorf("error") + }) + assertError("error", err) +} + func TestEmptyRootList(t *testing.T) { if skipGCS() != "" { t.Skip(skipGCS()) From 5c6fdc710f8680701720c98e7ebdcd4ab589f703 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Wed, 6 Jan 2016 18:17:17 +0000 Subject: [PATCH 356/501] GCS Storagedriver: fix test failure caused by #1187 Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 4cef972c..2b190dec 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -461,6 +461,11 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { break } } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } return list, nil } From 9c13a8295f4f2af968bc3e30d5acbc6b657b6141 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 14 Dec 2015 18:19:34 -0800 Subject: [PATCH 357/501] Factor out schema-specific portions of manifestStore Create signedManifestHandler and schema2ManifestHandler. Use these to unmarshal and put the respective types of manifests from manifestStore. Signed-off-by: Aaron Lehmann --- docs/storage/manifeststore.go | 196 +++++++------------------ docs/storage/registry.go | 46 +++--- docs/storage/schema2manifesthandler.go | 100 +++++++++++++ docs/storage/signedmanifesthandler.go | 150 +++++++++++++++++++ 4 files changed, 328 insertions(+), 164 deletions(-) create mode 100644 docs/storage/schema2manifesthandler.go create mode 100644 docs/storage/signedmanifesthandler.go diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 73061592..cd3aa43e 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -1,24 +1,51 @@ package storage import ( - "encoding/json" "fmt" + "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" + "github.com/docker/distribution/manifest/schema2" ) -// manifestStore is a storage driver based store for storing schema1 manifests. +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + type manifestStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context - signatures *signatureStore + repository *repository + blobStore *linkedBlobStore + ctx context.Context + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler } var _ distribution.ManifestService = &manifestStore{} @@ -40,18 +67,6 @@ func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - // Ensure that this revision is available in this repository. - _, err := ms.blobStore.Stat(ctx, dgst) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Name(), - Revision: dgst, - } - } - - return nil, err - } // TODO(stevvooe): Need to check descriptor from above to ensure that the // mediatype is as we expect for the manifest store. @@ -68,84 +83,32 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. return nil, err } - // Fetch the signatures for the manifest - signatures, err := ms.signatures.Get(dgst) - if err != nil { + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { return nil, err } - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) } - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - - return &sm, nil -} - -// SkipLayerVerification allows a manifest to be Put before its -// layers are on the filesystem -func SkipLayerVerification() distribution.ManifestServiceOption { - return skipLayerOption{} -} - -type skipLayerOption struct{} - -func (o skipLayerOption) Apply(m distribution.ManifestService) error { - if ms, ok := m.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifestStore") + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) } func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - sm, ok := manifest.(*schema1.SignedManifest) - if !ok { - return "", fmt.Errorf("non-v1 manifest put to signed manifestStore: %T", manifest) + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) } - if err := ms.verifyManifest(ms.ctx, *sm); err != nil { - return "", err - } - - mt := schema1.MediaTypeManifest - payload := sm.Canonical - - revision, err := ms.blobStore.Put(ctx, mt, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return "", err - } - - // Link the revision into the repository. - if err := ms.blobStore.linkBlob(ctx, revision); err != nil { - return "", err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } - - if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { - return "", err - } - - return revision.Digest, nil + return "", fmt.Errorf("unrecognized manifest type %T", manifest) } // Delete removes the revision of the specified manfiest. @@ -157,64 +120,3 @@ func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { return 0, distribution.ErrUnsupported } - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumems. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) - } - - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) - } - - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } - - if _, err := schema1.Verify(&mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } - } - - if !ms.skipDependencyVerification { - for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob erroms. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil -} diff --git a/docs/storage/registry.go b/docs/storage/registry.go index c58b91d8..d22c6c81 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -165,28 +165,40 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M blobLinkPath, } + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + } + ms := &manifestStore{ ctx: ctx, repository: repo, - blobStore: &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - }, - signatures: &signatureStore{ + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ ctx: ctx, repository: repo, - blobStore: repo.blobStore, + blobStore: blobStore, + signatures: &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: repo.blobStore, + }, + }, + schema2Handler: &schema2ManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, }, } diff --git a/docs/storage/schema2manifesthandler.go b/docs/storage/schema2manifesthandler.go new file mode 100644 index 00000000..9cec2e81 --- /dev/null +++ b/docs/storage/schema2manifesthandler.go @@ -0,0 +1,100 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumems. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + target := mnfst.Target() + _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) + } + + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/docs/storage/signedmanifesthandler.go b/docs/storage/signedmanifesthandler.go new file mode 100644 index 00000000..a375516a --- /dev/null +++ b/docs/storage/signedmanifesthandler.go @@ -0,0 +1,150 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + signatures *signatureStore +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + // Fetch the signatures for the manifest + signatures, err := ms.signatures.Get(dgst) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumems. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} From f14c6a4814bef8e3510e68fb909caf0f293294c2 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 15 Dec 2015 11:37:28 -0800 Subject: [PATCH 358/501] Recognize clients that don't support schema2, and convert manifests to schema1 on the fly Signed-off-by: Aaron Lehmann --- docs/handlers/app.go | 17 ++++++++++++++-- docs/handlers/images.go | 45 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 63a3ac29..8ea89da8 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -30,6 +30,7 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "golang.org/x/net/context" @@ -67,10 +68,15 @@ type App struct { redis *redis.Pool - // true if this registry is configured as a pull through cache + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache isCache bool - // true if the registry is in a read-only maintenance mode + // readOnly is true if the registry is in a read-only maintenance mode readOnly bool } @@ -139,6 +145,13 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap app.configureRedis(configuration) app.configureLogHook(configuration) + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + if configuration.HTTP.Host != "" { u, err := url.Parse(configuration.HTTP.Host) if err != nil { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index be14b00a..be586602 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -8,6 +8,8 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" @@ -51,8 +53,6 @@ type imageManifestHandler struct { } // GetImageManifest fetches the image manifest from the storage backend, if it exists. -// todo(richardscothern): this assumes v2 schema 1 manifests for now but in the future -// get the version from the Accept HTTP header func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) @@ -83,6 +83,47 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if _, isSchema2 := manifest.(*schema2.DeserializedManifest); imh.Tag != "" && isSchema2 { + supportsSchema2 := false + if acceptHeaders, ok := r.Header["Accept"]; ok { + for _, mediaType := range acceptHeaders { + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + break + } + } + } + + if !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + targetDescriptor := manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) + for _, d := range manifest.References() { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + } + manifest, err = builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return + } + } + } + ct, p, err := manifest.Payload() if err != nil { return From 66a33baa36ed82f7412e01d2a996c3cd73ba3a9c Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 16 Dec 2015 14:30:49 -0800 Subject: [PATCH 359/501] Add API unit testing for schema2 manifest Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 423 ++++++++++++++++++++++++++++++++++---- docs/handlers/images.go | 4 +- 2 files changed, 383 insertions(+), 44 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 2672b77b..e38b4da8 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -18,11 +18,13 @@ import ( "strings" "testing" + "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -690,48 +692,40 @@ func httpDelete(url string) (*http.Response, error) { } type manifestArgs struct { - imageName string - signedManifest *schema1.SignedManifest - dgst digest.Digest -} - -func makeManifestArgs(t *testing.T) manifestArgs { - args := manifestArgs{ - imageName: "foo/bar", - } - - return args + imageName string + mediaType string + manifest distribution.Manifest + dgst digest.Digest } func TestManifestAPI(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestAPI(t, env, args) + testManifestAPISchema1(t, env, "foo/schema1") + testManifestAPISchema2(t, env, "foo/schema2") deleteEnabled = true env = newTestEnv(t, deleteEnabled) - args = makeManifestArgs(t) - testManifestAPI(t, env, args) + testManifestAPISchema1(t, env, "foo/schema1") + testManifestAPISchema2(t, env, "foo/schema2") } func TestManifestDelete(t *testing.T) { deleteEnabled := true env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - env, args = testManifestAPI(t, env, args) - testManifestDelete(t, env, args) + schema1Args := testManifestAPISchema1(t, env, "foo/schema1") + testManifestDelete(t, env, schema1Args) + schema2Args := testManifestAPISchema2(t, env, "foo/schema2") + testManifestDelete(t, env, schema2Args) } func TestManifestDeleteDisabled(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestDeleteDisabled(t, env, args) + testManifestDeleteDisabled(t, env, "foo/schema1") } -func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { - imageName := args.imageName +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName string) { manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) @@ -744,12 +738,11 @@ func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) * defer resp.Body.Close() checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) - return nil } -func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { - imageName := args.imageName +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manifestArgs { tag := "thetag" + args := manifestArgs{imageName: imageName} manifestURL, err := env.builder.BuildManifestURL(imageName, tag) if err != nil { @@ -808,10 +801,10 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m }, } - resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) defer resp.Body.Close() checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestInvalid) + _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestInvalid: 1, @@ -827,7 +820,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error signing manifest: %v", err) } - resp = putManifest(t, "putting signed manifest with errors", manifestURL, sm) + resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) defer resp.Body.Close() checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, @@ -872,13 +865,13 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m } dgst := digest.FromBytes(signedManifest.Canonical) - args.signedManifest = signedManifest + args.manifest = signedManifest args.dgst = dgst manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") - resp = putManifest(t, "putting signed manifest no error", manifestURL, signedManifest) + resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, @@ -887,7 +880,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m // -------------------- // Push by digest -- should get same result - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, @@ -958,7 +951,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m } - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, sm2) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp, err = http.Get(manifestDigestURL) @@ -1020,8 +1013,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m } defer resp.Body.Close() - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + checkResponse(t, "getting tags", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) var tagsResponse tagsAPIResponse @@ -1052,16 +1044,359 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error signing manifest") } - resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, invalidSigned) + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) - return env, args + return args +} + +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manifestArgs { + tag := "schema2tag" + args := manifestArgs{ + imageName: imageName, + mediaType: schema2.MediaTypeManifest, + } + + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) + + // -------------------------------- + // Attempt to push manifest with missing config and missing layers + manifest := &schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + }, + MediaType: schema2.MediaTypeManifest, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeLayer, + }, + { + Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", + Size: 6863, + MediaType: schema2.MediaTypeLayer, + }, + }, + } + + resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 3, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push a config, and reference it in the manifest + sampleConfig := []byte(`{ + "architecture": "amd64", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + } + ], + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + } + }`) + sampleConfigDigest := digest.FromBytes(sampleConfig) + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) + manifest.Config.Digest = sampleConfigDigest + manifest.Config.Size = int64(len(sampleConfig)) + + // The manifest should still be invalid, because its layer doesnt exist + resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range manifest.Layers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + manifest.Layers[i].Digest = dgst + + uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the manifest with all layers pushed. + deserializedManifest, err := schema2.FromStruct(*manifest) + if err != nil { + t.Fatalf("could not create DeserializedManifest: %v", err) + } + _, canonical, err := deserializedManifest.Payload() + if err != nil { + t.Fatalf("could not get manifest payload: %v", err) + } + dgst := digest.FromBytes(canonical) + args.dgst = dgst + args.manifest = deserializedManifest + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema2.DeserializedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema2.DeserializedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest as schema1: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedSchema1Manifest schema1.SignedManifest + dec = json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedSchema1Manifest); err != nil { + t.Fatalf("error decoding fetched schema1 manifest: %v", err) + } + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + for i := range manifest.Layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields becuase we're using randomly-generated + // layers. + + return args } func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { imageName := args.imageName dgst := args.dgst - signedManifest := args.signedManifest + manifest := args.manifest manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) // --------------- // Delete by digest @@ -1090,8 +1425,8 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { // -------------------- // Re-upload manifest by digest - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) + checkResponse(t, "putting manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -1183,7 +1518,7 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te } } -func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { +func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { var body []byte if sm, ok := v.(*schema1.SignedManifest); ok { @@ -1205,6 +1540,10 @@ func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { t.Fatalf("error creating request for %s: %v", msg, err) } + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatalf("error doing put request while %s: %v", msg, err) @@ -1532,7 +1871,7 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) location, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building location URL") - resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{location}, @@ -1570,7 +1909,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - resp := putManifest(t, "putting unsigned manifest", manifestURL, sm) + resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Manifest Delete diff --git a/docs/handlers/images.go b/docs/handlers/images.go index be586602..6c8a16fb 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -86,7 +86,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http // Only rewrite schema2 manifests when they are being fetched by tag. // If they are being fetched by digest, we can't return something not // matching the digest. - if _, isSchema2 := manifest.(*schema2.DeserializedManifest); imh.Tag != "" && isSchema2 { + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); imh.Tag != "" && isSchema2 { supportsSchema2 := false if acceptHeaders, ok := r.Header["Accept"]; ok { for _, mediaType := range acceptHeaders { @@ -101,7 +101,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) - targetDescriptor := manifest.Target() + targetDescriptor := schema2Manifest.Target() blobs := imh.Repository.Blobs(imh) configJSON, err := blobs.Get(imh, targetDescriptor.Digest) if err != nil { From 7ef71988a8e3c4fb51041ac813c00b46bb706016 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 16 Dec 2015 17:26:13 -0800 Subject: [PATCH 360/501] Add support for manifest list ("fat manifest") Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 193 ++++++++++++++++++++++++- docs/storage/manifestlisthandler.go | 96 ++++++++++++ docs/storage/manifeststore.go | 24 ++- docs/storage/registry.go | 5 + docs/storage/schema2manifesthandler.go | 5 +- docs/storage/signedmanifesthandler.go | 2 +- 6 files changed, 313 insertions(+), 12 deletions(-) create mode 100644 docs/storage/manifestlisthandler.go diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index e38b4da8..0393c8f1 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -23,6 +23,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/api/errcode" @@ -702,12 +703,14 @@ func TestManifestAPI(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) testManifestAPISchema1(t, env, "foo/schema1") - testManifestAPISchema2(t, env, "foo/schema2") + schema2Args := testManifestAPISchema2(t, env, "foo/schema2") + testManifestAPIManifestList(t, env, schema2Args) deleteEnabled = true env = newTestEnv(t, deleteEnabled) testManifestAPISchema1(t, env, "foo/schema1") - testManifestAPISchema2(t, env, "foo/schema2") + schema2Args = testManifestAPISchema2(t, env, "foo/schema2") + testManifestAPIManifestList(t, env, schema2Args) } func TestManifestDelete(t *testing.T) { @@ -1393,6 +1396,179 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manife return args } +func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + tag := "manifestlisttag" + + manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // -------------------------------- + // Attempt to push manifest list that refers to an unknown manifest + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + }, + MediaType: manifestlist.MediaTypeManifestList, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "amd64", + OS: "linux", + }, + }, + }, + } + + resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) + defer resp.Body.Close() + checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // ------------------- + // Push a manifest list that references an actual manifest + manifestList.Manifests[0].Digest = args.dgst + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + t.Fatalf("could not create DeserializedManifestList: %v", err) + } + _, canonical, err := deserializedManifestList.Payload() + if err != nil { + t.Fatalf("could not get manifest list payload: %v", err) + } + dgst := digest.FromBytes(canonical) + + manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + req.Header.Add("Accept", schema1.MediaTypeManifest) + req.Header.Add("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest list: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestList manifestlist.DeserializedManifestList + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifestList); err != nil { + t.Fatalf("error decoding fetched manifest list: %v", err) + } + + _, fetchedCanonical, err := fetchedManifestList.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifest lists do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest list by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestListByDigest manifestlist.DeserializedManifestList + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestListByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) +} + func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { imageName := args.imageName dgst := args.dgst @@ -1521,13 +1697,20 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { var body []byte - if sm, ok := v.(*schema1.SignedManifest); ok { - _, pl, err := sm.Payload() + switch m := v.(type) { + case *schema1.SignedManifest: + _, pl, err := m.Payload() if err != nil { t.Fatalf("error getting payload: %v", err) } body = pl - } else { + case *manifestlist.DeserializedManifestList: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + default: var err error body, err = json.MarshalIndent(v, "", " ") if err != nil { diff --git a/docs/storage/manifestlisthandler.go b/docs/storage/manifestlisthandler.go new file mode 100644 index 00000000..42027d13 --- /dev/null +++ b/docs/storage/manifestlisthandler.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index cd3aa43e..cd01670b 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" ) @@ -44,8 +45,9 @@ type manifestStore struct { skipDependencyVerification bool - schema1Handler ManifestHandler - schema2Handler ManifestHandler + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler } var _ distribution.ManifestService = &manifestStore{} @@ -92,7 +94,21 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. case 1: return ms.schema1Handler.Unmarshal(ctx, dgst, content) case 2: - return ms.schema2Handler.Unmarshal(ctx, dgst, content) + // This can be an image manifest or a manifest list + var mediaType struct { + MediaType string `json:"mediaType"` + } + if err = json.Unmarshal(content, &mediaType); err != nil { + return nil, err + } + switch mediaType.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", mediaType.MediaType)} + } } return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) @@ -106,6 +122,8 @@ func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) case *schema2.DeserializedManifest: return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) } return "", fmt.Errorf("unrecognized manifest type %T", manifest) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index d22c6c81..b3810676 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -200,6 +200,11 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M repository: repo, blobStore: blobStore, }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + }, } // Apply options diff --git a/docs/storage/schema2manifesthandler.go b/docs/storage/schema2manifesthandler.go index 9cec2e81..115786e2 100644 --- a/docs/storage/schema2manifesthandler.go +++ b/docs/storage/schema2manifesthandler.go @@ -62,9 +62,8 @@ func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution } // verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumems. +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification diff --git a/docs/storage/signedmanifesthandler.go b/docs/storage/signedmanifesthandler.go index a375516a..02663226 100644 --- a/docs/storage/signedmanifesthandler.go +++ b/docs/storage/signedmanifesthandler.go @@ -91,7 +91,7 @@ func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution. // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumems. +// content, leaving trust policies of that content up to consumers. func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification From fce65b72b3d5a11b413121344b964b15ede1f4c0 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 17 Dec 2015 17:32:11 -0800 Subject: [PATCH 361/501] Recognize clients that don't support manifest lists Convert a default platform's manifest to schema1 on the fly. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 49 +++++++++++++++++ docs/handlers/images.go | 107 ++++++++++++++++++++++++++++---------- 2 files changed, 129 insertions(+), 27 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 0393c8f1..50a8cb47 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1567,6 +1567,55 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) } checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedSchema1Manifest schema1.SignedManifest + dec = json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedSchema1Manifest); err != nil { + t.Fatalf("error decoding fetched schema1 manifest: %v", err) + } + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + layers := args.manifest.(*schema2.DeserializedManifest).Layers + for i := range layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields becuase we're using randomly-generated + // layers. } func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 6c8a16fb..240bbffe 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/api/errcode" @@ -15,6 +16,13 @@ import ( "github.com/gorilla/handlers" ) +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" +) + // imageManifestDispatcher takes the request context and builds the // appropriate handler for handling image manifest requests. func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { @@ -83,42 +91,62 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } + supportsSchema2 := false + supportsManifestList := false + if acceptHeaders, ok := r.Header["Accept"]; ok { + for _, mediaType := range acceptHeaders { + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + // Only rewrite schema2 manifests when they are being fetched by tag. // If they are being fetched by digest, we can't return something not // matching the digest. - if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); imh.Tag != "" && isSchema2 { - supportsSchema2 := false - if acceptHeaders, ok := r.Header["Accept"]; ok { - for _, mediaType := range acceptHeaders { - if mediaType == schema2.MediaTypeManifest { - supportsSchema2 = true - break - } + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break } } - if !supportsSchema2 { - // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } - targetDescriptor := schema2Manifest.Target() - blobs := imh.Repository.Blobs(imh) - configJSON, err := blobs.Get(imh, targetDescriptor.Digest) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } - builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) - for _, d := range manifest.References() { - if err := builder.AppendReference(d); err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } - } - manifest, err = builder.Build(imh) + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } } @@ -136,6 +164,31 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http w.Write(p) } +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) + for _, d := range schema2Manifest.References() { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + return manifest, nil +} + func etagMatch(r *http.Request, etag string) bool { for _, headerVal := range r.Header["If-None-Match"] { if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted From bbabb55ccbb9ef46c234b517785a91601543d88e Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 6 Jan 2016 14:15:14 -0800 Subject: [PATCH 362/501] Move MediaType into manifest.Versioned This makes content type sniffing cleaner. The document just needs to be decoded into a manifest.Versioned structure. It's no longer a two-step process. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 4 ++-- docs/storage/manifeststore.go | 10 ++-------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 50a8cb47..8195f47b 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1096,8 +1096,8 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manife manifest := &schema2.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, }, - MediaType: schema2.MediaTypeManifest, Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, @@ -1410,8 +1410,8 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) manifestList := &manifestlist.ManifestList{ Versioned: manifest.Versioned{ SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, }, - MediaType: manifestlist.MediaTypeManifestList, Manifests: []manifestlist.ManifestDescriptor{ { Descriptor: distribution.Descriptor{ diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index cd01670b..31daa83c 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -95,19 +95,13 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. return ms.schema1Handler.Unmarshal(ctx, dgst, content) case 2: // This can be an image manifest or a manifest list - var mediaType struct { - MediaType string `json:"mediaType"` - } - if err = json.Unmarshal(content, &mediaType); err != nil { - return nil, err - } - switch mediaType.MediaType { + switch versioned.MediaType { case schema2.MediaTypeManifest: return ms.schema2Handler.Unmarshal(ctx, dgst, content) case manifestlist.MediaTypeManifestList: return ms.manifestListHandler.Unmarshal(ctx, dgst, content) default: - return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", mediaType.MediaType)} + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} } } From 41e30f626b4fe92085f77cdb31ff10f2dc3dcbcc Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 14 Dec 2015 18:34:18 -0800 Subject: [PATCH 363/501] Adds cross-repository blob mounting behavior Extends blob upload POST endpoint to support mount and from query parameters as described in #634 Signed-off-by: Brian Bland --- docs/api/v2/descriptors.go | 64 +++++++++++++ docs/client/repository.go | 56 +++++++++++ docs/client/repository_test.go | 55 +++++++++++ docs/handlers/app.go | 5 + docs/handlers/blobupload.go | 62 +++++++++++-- docs/proxy/proxyblobstore.go | 4 + docs/proxy/proxyblobstore_test.go | 8 ++ docs/storage/blob_test.go | 148 ++++++++++++++++++++++++++++++ docs/storage/linkedblobstore.go | 23 +++++ docs/storage/registry.go | 1 + 10 files changed, 416 insertions(+), 10 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 52c725dc..ad3da3ef 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -1041,6 +1041,70 @@ var routeDescriptors = []RouteDescriptor{ deniedResponseDescriptor, }, }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, }, }, }, diff --git a/docs/client/repository.go b/docs/client/repository.go index 758c6e5e..8f30b4f1 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "strconv" + "sync" "time" "github.com/docker/distribution" @@ -499,6 +500,9 @@ type blobs struct { statter distribution.BlobDescriptorService distribution.BlobDeleter + + cacheLock sync.Mutex + cachedBlobUpload distribution.BlobWriter } func sanitizeLocation(location, base string) (string, error) { @@ -573,7 +577,20 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut } func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { + bs.cacheLock.Lock() + if bs.cachedBlobUpload != nil { + upload := bs.cachedBlobUpload + bs.cachedBlobUpload = nil + bs.cacheLock.Unlock() + + return upload, nil + } + bs.cacheLock.Unlock() + u, err := bs.ub.BuildBlobUploadURL(bs.name) + if err != nil { + return nil, err + } resp, err := bs.client.Post(u, "", nil) if err != nil { @@ -604,6 +621,45 @@ func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter panic("not implemented") } +func (bs *blobs) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + u, err := bs.ub.BuildBlobUploadURL(bs.name, url.Values{"from": {sourceRepo}, "mount": {dgst.String()}}) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + return bs.Stat(ctx, dgst) + case http.StatusAccepted: + // Triggered a blob upload (legacy behavior), so cache the creation info + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return distribution.Descriptor{}, err + } + + bs.cacheLock.Lock() + bs.cachedBlobUpload = &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + } + bs.cacheLock.Unlock() + + return distribution.Descriptor{}, HandleErrorResponse(resp) + default: + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index c1032ec1..8a7fc1c9 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -466,6 +466,61 @@ func TestBlobUploadMonolithic(t *testing.T) { } } +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo := "test.example.com/uploadrepo" + sourceRepo := "test.example.com/sourcerepo" + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + stat, err := l.Mount(ctx, sourceRepo, dgst) + if err != nil { + t.Fatal(err) + } + + if stat.Digest != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, dgst) + } +} + func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 8ea89da8..23225493 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -710,6 +710,11 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont if repo != "" { accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } } else { // Only allow the name not to be set on the base route. if app.nameRequired(r) { diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 1bd33d33..c5638c83 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -116,8 +116,16 @@ type blobUploadHandler struct { } // StartBlobUpload begins the blob upload process and allocates a server-side -// blob writer session. +// blob writer session, optionally mounting the blob from a separate repository. func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + buh.mountBlob(w, fromRepo, mountDigest) + return + } + blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh) @@ -254,18 +262,10 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - - // Build our canonical blob url - blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) - if err != nil { + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } - - w.Header().Set("Location", blobURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - w.WriteHeader(http.StatusCreated) } // CancelBlobUpload cancels an in-progress upload of a blob. @@ -335,3 +335,45 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. return nil } + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) mountBlob(w http.ResponseWriter, fromRepo, mountDigest string) { + dgst, err := digest.ParseDigest(mountDigest) + if err != nil { + buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + return + } + + blobs := buh.Repository.Blobs(buh) + desc, err := blobs.Mount(buh, fromRepo, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(dgst)) + } else { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index 976dc8d7..ca39f9f8 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -169,6 +169,10 @@ func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution. return nil, distribution.ErrUnsupported } +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported +} + func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { return nil, distribution.ErrUnsupported } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index eb623197..5c5015a0 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -58,6 +58,14 @@ func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.B return sbs.blobs.Resume(ctx, id) } +func (sbs statsBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + sbsMu.Lock() + sbs.stats["mount"]++ + sbsMu.Unlock() + + return sbs.blobs.Mount(ctx, sourceRepo, dgst) +} + func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { sbsMu.Lock() sbs.stats["open"]++ diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index c6cfbcda..b89814c7 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -310,6 +310,154 @@ func TestSimpleBlobRead(t *testing.T) { } } +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName := "foo/bar" + sourceImageName := "foo/source" + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } + + mountDesc, err := bs.Mount(ctx, sourceRepository.Name(), desc.Digest) + if err != nil { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if mountDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", mountDesc, desc) + } + + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) + } + + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } +} + // TestLayerUploadZeroLength uploads zero-length func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 430da1ca..8b7f9f51 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -20,6 +20,7 @@ type linkPathFunc func(name string, dgst digest.Digest) (string, error) // that grant access to the global blob store. type linkedBlobStore struct { *blobStore + registry *registry blobServer distribution.BlobServer blobAccessController distribution.BlobDescriptorService repository distribution.Repository @@ -185,6 +186,28 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro return nil } +func (lbs *linkedBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err := repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + // newBlobUpload allocates a new upload controller with the given state. func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { fw, err := newFileWriter(ctx, lbs.driver, path) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index b3810676..869895dd 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -233,6 +233,7 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { } return &linkedBlobStore{ + registry: repo.registry, blobStore: repo.blobStore, blobServer: repo.blobServer, blobAccessController: statter, From 44d95e58418f802d444def6e127706c23b880a1c Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 5 Jan 2016 11:13:27 -0800 Subject: [PATCH 364/501] Allows token authentication handler to request additional scopes When an auth request provides the "from" query parameter, the token handler will add a "pull" scope for the provided repository, refreshing the token if the overall scope has increased Signed-off-by: Brian Bland --- docs/client/auth/session.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 9819b3cb..6b483c62 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -108,6 +108,8 @@ type tokenHandler struct { tokenLock sync.Mutex tokenCache string tokenExpiration time.Time + + additionalScopes map[string]struct{} } // tokenScope represents the scope at which a token will be requested. @@ -145,6 +147,7 @@ func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock Scope: scope, Actions: actions, }, + additionalScopes: map[string]struct{}{}, } } @@ -160,7 +163,15 @@ func (th *tokenHandler) Scheme() string { } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if err := th.refreshToken(params); err != nil { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, tokenScope{ + Resource: "repository", + Scope: fromParam, + Actions: []string{"pull"}, + }.String()) + } + if err := th.refreshToken(params, additionalScopes...); err != nil { return err } @@ -169,11 +180,18 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } -func (th *tokenHandler) refreshToken(params map[string]string) error { +func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() + var addedScopes bool + for _, scope := range additionalScopes { + if _, ok := th.additionalScopes[scope]; !ok { + th.additionalScopes[scope] = struct{}{} + addedScopes = true + } + } now := th.clock.Now() - if now.After(th.tokenExpiration) { + if now.After(th.tokenExpiration) || addedScopes { tr, err := th.fetchToken(params) if err != nil { return err @@ -223,6 +241,10 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon reqParams.Add("scope", scopeField) } + for scope := range th.additionalScopes { + reqParams.Add("scope", scope) + } + if th.creds != nil { username, password := th.creds.Basic(realmURL) if username != "" && password != "" { From 93b65847ca06b5bce74c5b7ec0b401094476c828 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 11 Jan 2016 12:52:21 -0800 Subject: [PATCH 365/501] Fix manifest API unit tests Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 8c5a1693..a1aac3cd 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1681,9 +1681,9 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { // -------------------- // Uupload manifest by tag - tag := signedManifest.Tag + tag := "atag" manifestTagURL, err := env.builder.BuildManifestURL(imageName, tag) - resp = putManifest(t, "putting signed manifest by tag", manifestTagURL, signedManifest) + resp = putManifest(t, "putting signed manifest by tag", manifestTagURL, args.mediaType, manifest) checkResponse(t, "putting signed manifest by tag", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, From 36023174db108428751f21c3a115a019628d0689 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 13 Jan 2016 11:44:42 -0800 Subject: [PATCH 366/501] Adds functional options arguments to the Blobs Create method Removes the Mount operation and instead implements this behavior as part of Create a From option is provided, which in turn returns a rich ErrBlobMounted indicating that a blob upload session was not initiated, but instead the blob was mounted from another repository Signed-off-by: Brian Bland --- docs/client/repository.go | 79 ++++++++++--------------------- docs/client/repository_test.go | 29 ++++++++++-- docs/handlers/blobupload.go | 42 +++++++++------- docs/proxy/proxyblobstore.go | 2 +- docs/proxy/proxyblobstore_test.go | 12 +---- docs/storage/blob_test.go | 21 ++++++-- docs/storage/linkedblobstore.go | 54 ++++++++++++++++++++- 7 files changed, 146 insertions(+), 93 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 8f30b4f1..c2aca03f 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -10,7 +10,6 @@ import ( "net/http" "net/url" "strconv" - "sync" "time" "github.com/docker/distribution" @@ -19,6 +18,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" ) @@ -500,9 +500,6 @@ type blobs struct { statter distribution.BlobDescriptorService distribution.BlobDeleter - - cacheLock sync.Mutex - cachedBlobUpload distribution.BlobWriter } func sanitizeLocation(location, base string) (string, error) { @@ -576,18 +573,23 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { - bs.cacheLock.Lock() - if bs.cachedBlobUpload != nil { - upload := bs.cachedBlobUpload - bs.cachedBlobUpload = nil - bs.cacheLock.Unlock() +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts storage.CreateOptions - return upload, nil + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } } - bs.cacheLock.Unlock() - u, err := bs.ub.BuildBlobUploadURL(bs.name) + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) if err != nil { return nil, err } @@ -598,7 +600,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -613,53 +622,15 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { startedAt: time.Now(), location: location, }, nil + default: + return nil, HandleErrorResponse(resp) } - return nil, HandleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } -func (bs *blobs) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := bs.ub.BuildBlobUploadURL(bs.name, url.Values{"from": {sourceRepo}, "mount": {dgst.String()}}) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - return bs.Stat(ctx, dgst) - case http.StatusAccepted: - // Triggered a blob upload (legacy behavior), so cache the creation info - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return distribution.Descriptor{}, err - } - - bs.cacheLock.Lock() - bs.cachedBlobUpload = &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - } - bs.cacheLock.Unlock() - - return distribution.Descriptor{}, HandleErrorResponse(resp) - default: - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 8a7fc1c9..811ab235 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -18,7 +18,9 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" @@ -471,6 +473,16 @@ func TestBlobMount(t *testing.T) { var m testutil.RequestResponseMap repo := "test.example.com/uploadrepo" sourceRepo := "test.example.com/sourcerepo" + + namedRef, err := reference.ParseNamed(sourceRepo) + if err != nil { + t.Fatal(err) + } + canonicalRef, err := reference.WithDigest(namedRef, dgst) + if err != nil { + t.Fatal(err) + } + m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", @@ -511,13 +523,20 @@ func TestBlobMount(t *testing.T) { l := r.Blobs(ctx) - stat, err := l.Mount(ctx, sourceRepo, dgst) - if err != nil { - t.Fatal(err) + bw, err := l.Create(ctx, storage.WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) } - if stat.Digest != dgst { - t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, dgst) + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) } } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index c5638c83..0f325184 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -9,8 +9,10 @@ import ( "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -118,19 +120,27 @@ type blobUploadHandler struct { // StartBlobUpload begins the blob upload process and allocates a server-side // blob writer session, optionally mounting the blob from a separate repository. func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + fromRepo := r.FormValue("from") mountDigest := r.FormValue("mount") if mountDigest != "" && fromRepo != "" { - buh.mountBlob(w, fromRepo, mountDigest) - return + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if err != nil { + options = append(options, opt) + } } blobs := buh.Repository.Blobs(buh) - upload, err := blobs.Create(buh) + upload, err := blobs.Create(buh, options...) if err != nil { - if err == distribution.ErrUnsupported { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) } else { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -339,27 +349,23 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. // mountBlob attempts to mount a blob from another repository by its digest. If // successful, the blob is linked into the blob store and 201 Created is // returned with the canonical url of the blob. -func (buh *blobUploadHandler) mountBlob(w http.ResponseWriter, fromRepo, mountDigest string) { +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { dgst, err := digest.ParseDigest(mountDigest) if err != nil { - buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) - return + return nil, err } - blobs := buh.Repository.Blobs(buh) - desc, err := blobs.Mount(buh, fromRepo, dgst) + ref, err := reference.ParseNamed(fromRepo) if err != nil { - if err == distribution.ErrBlobUnknown { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(dgst)) - } else { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return + return nil, err } - if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err } + + return storage.WithMountFrom(canonical), nil } // writeBlobCreatedHeaders writes the standard headers describing a newly diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index ca39f9f8..41b76e8e 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -161,7 +161,7 @@ func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) return distribution.Descriptor{}, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 5c5015a0..7702771c 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -42,12 +42,12 @@ func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, return sbs.blobs.Get(ctx, dgst) } -func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { sbsMu.Lock() sbs.stats["create"]++ sbsMu.Unlock() - return sbs.blobs.Create(ctx) + return sbs.blobs.Create(ctx, options...) } func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -58,14 +58,6 @@ func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.B return sbs.blobs.Resume(ctx, id) } -func (sbs statsBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { - sbsMu.Lock() - sbs.stats["mount"]++ - sbsMu.Unlock() - - return sbs.blobs.Mount(ctx, sourceRepo, dgst) -} - func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { sbsMu.Lock() sbs.stats["open"]++ diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index b89814c7..e1eacc00 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -377,13 +378,27 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) } - mountDesc, err := bs.Mount(ctx, sourceRepository.Name(), desc.Digest) + namedRef, err := reference.ParseNamed(sourceRepository.Name()) if err != nil { + t.Fatal(err) + } + canonicalRef, err := reference.WithDigest(namedRef, desc.Digest) + if err != nil { + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") + } + + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { t.Fatalf("unexpected error mounting layer: %v", err) } - if mountDesc != desc { - t.Fatalf("descriptors not equal: %v != %v", mountDesc, desc) + if ebm.Descriptor != desc { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) } // Test for existence. diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 8b7f9f51..d7a9fd13 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -1,12 +1,14 @@ package storage import ( + "fmt" "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" ) @@ -95,10 +97,58 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return desc, lbs.linkBlob(ctx, desc) } +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + // Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + var opts CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From.Name(), opts.Mount.From.Digest()) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + uuid := uuid.Generate().String() startedAt := time.Now().UTC() @@ -186,7 +236,7 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro return nil } -func (lbs *linkedBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { repo, err := lbs.registry.Repository(ctx, sourceRepo) if err != nil { return distribution.Descriptor{}, err From 5d35fa34c151571b29d42fdfb266da997ebde6f8 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 6 Jan 2016 14:46:25 -0800 Subject: [PATCH 367/501] Change the parameters to the GCS drivers to allow CircleCI testing. Remove the requirement of file system access to run GCS unit tests. Deconstruct the input parameters to take the private key and email which can be specified on the build system via environment variables. Signed-off-by: Richard Scothern --- docs/storage/driver/gcs/gcs.go | 77 ++++++++++++++--------------- docs/storage/driver/gcs/gcs_test.go | 55 ++++++++++++++------- 2 files changed, 75 insertions(+), 57 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index d61d88b8..765d5492 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -32,7 +32,7 @@ import ( "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" - + "golang.org/x/oauth2/jwt" "google.golang.org/api/googleapi" storageapi "google.golang.org/api/storage/v1" "google.golang.org/cloud" @@ -47,10 +47,13 @@ import ( const driverName = "gcs" const dummyProjectID = "" -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set type driverParameters struct { bucket string - keyfile string + config *jwt.Config + email string + privateKey []byte + client *http.Client rootDirectory string } @@ -80,25 +83,43 @@ type driver struct { // Required parameters: // - bucket func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - bucket, ok := parameters["bucket"] if !ok || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } - keyfile, ok := parameters["keyfile"] - if !ok { - keyfile = "" - } - rootDirectory, ok := parameters["rootdirectory"] if !ok { rootDirectory = "" } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + + } + params := driverParameters{ - fmt.Sprint(bucket), - fmt.Sprint(keyfile), - fmt.Sprint(rootDirectory), + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), } return New(params) @@ -106,8 +127,6 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri // New constructs a new driver func New(params driverParameters) (storagedriver.StorageDriver, error) { - var ts oauth2.TokenSource - var err error rootDirectory := strings.Trim(params.rootDirectory, "/") if rootDirectory != "" { rootDirectory += "/" @@ -115,33 +134,11 @@ func New(params driverParameters) (storagedriver.StorageDriver, error) { d := &driver{ bucket: params.bucket, rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, } - if params.keyfile == "" { - ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) - if err != nil { - return nil, err - } - } else { - jsonKey, err := ioutil.ReadFile(params.keyfile) - if err != nil { - return nil, err - } - conf, err := google.JWTConfigFromJSON( - jsonKey, - storage.ScopeFullControl, - ) - if err != nil { - return nil, err - } - ts = conf.TokenSource(context.Background()) - d.email = conf.Email - d.privateKey = conf.PrivateKey - } - client := oauth2.NewClient(context.Background(), ts) - d.client = client - if err != nil { - return nil, err - } + return &base.Base{ StorageDriver: d, }, nil diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 4062b2a3..60f3e957 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -3,17 +3,18 @@ package gcs import ( - "fmt" "io/ioutil" "os" "testing" - "google.golang.org/api/googleapi" - + "fmt" ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/cloud/storage" "gopkg.in/check.v1" ) @@ -25,34 +26,54 @@ var skipGCS func() string func init() { bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") - keyfile := os.Getenv("REGISTRY_STORAGE_GCS_KEYFILE") credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || credentials == "" { + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, REGISTRY_STORAGE_GCS_CREDS" + } + return "" + } + + if skipGCS() != "" { + return + } + root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) - gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + _, err = os.Stat(credentials) + if err == nil { + jsonKey, err := ioutil.ReadFile(credentials) + if err != nil { + panic(fmt.Sprintf("Unable to read credentials from file : %s", err)) + } + credentials = string(jsonKey) + } + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { parameters := driverParameters{ - bucket, - keyfile, - rootDirectory, + bucket: bucket, + rootDirectory: root, + email: jwtConfig.Email, + privateKey: []byte(jwtConfig.PrivateKey), + client: oauth2.NewClient(ctx.Background(), jwtConfig.TokenSource(ctx.Background())), } return New(parameters) } - // Skip GCS storage driver tests if environment variable parameters are not provided - skipGCS = func() string { - if bucket == "" || (credentials == "" && keyfile == "") { - return "Must set REGISTRY_STORAGE_GCS_BUCKET and (GOOGLE_APPLICATION_CREDENTIALS or REGISTRY_STORAGE_GCS_KEYFILE) to run GCS tests" - } - return "" - } - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return gcsDriverConstructor(root) }, skipGCS) From e0d4a45c93cfcee3b1b29636d18e9bb3d4bfff34 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 13 Jan 2016 19:20:02 -0800 Subject: [PATCH 368/501] Fixes cross-repo blob mounting in the BlobUploadHandler Accidentally checked for err != nil instead of err == nil :/ Also now ensures that only a non-nil option is appended to the create options slice Signed-off-by: Brian Bland --- docs/handlers/blobupload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 0f325184..1e3bff95 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -127,7 +127,7 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req if mountDigest != "" && fromRepo != "" { opt, err := buh.createBlobMountOption(fromRepo, mountDigest) - if err != nil { + if opt != nil && err == nil { options = append(options, opt) } } From 67aef89bc082ef4a3652fc96017318b4d215cf36 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 14 Jan 2016 10:08:52 -0800 Subject: [PATCH 369/501] Splits up blob create options definitions to be package-specific Redefines privately in both storage and client packages Signed-off-by: Brian Bland --- docs/client/repository.go | 34 +++++++++++++++++++++++++++++++-- docs/client/repository_test.go | 3 +-- docs/storage/linkedblobstore.go | 8 ++++---- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index c2aca03f..d6521211 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -18,7 +18,6 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" ) @@ -573,8 +572,39 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts storage.CreateOptions + var opts createOptions for _, option := range options { err := option.Apply(&opts) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 811ab235..bdd7ea20 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -20,7 +20,6 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" @@ -523,7 +522,7 @@ func TestBlobMount(t *testing.T) { l := r.Blobs(ctx) - bw, err := l.Create(ctx, storage.WithMountFrom(canonicalRef)) + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) if bw != nil { t.Fatalf("Expected blob writer to be nil, was %v", bw) } diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index d7a9fd13..a1f8724d 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -97,9 +97,9 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return desc, lbs.linkBlob(ctx, desc) } -// CreateOptions is a collection of blob creation modifiers relevant to general +// createOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { +type createOptions struct { Mount struct { ShouldMount bool From reference.Canonical @@ -116,7 +116,7 @@ func (f optionFunc) Apply(v interface{}) error { // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { return optionFunc(func(v interface{}) error { - opts, ok := v.(*CreateOptions) + opts, ok := v.(*createOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) } @@ -132,7 +132,7 @@ func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") - var opts CreateOptions + var opts createOptions for _, option := range options { err := option.Apply(&opts) From d3d9282a30472edf218d2d40828c332c27da09c3 Mon Sep 17 00:00:00 2001 From: yuzou Date: Fri, 15 Jan 2016 17:22:43 +0800 Subject: [PATCH 370/501] In testsuites.go, enlarge the size of randomBytes to 128M to fix the crash of running TestConcurrentStreamReads Signed-off-by: yuzou --- docs/storage/driver/testsuites/testsuites.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 70300309..6fea2def 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -1144,7 +1144,7 @@ func randomFilename(length int64) string { // randomBytes pre-allocates all of the memory sizes needed for the test. If // anything panics while accessing randomBytes, just make this number bigger. -var randomBytes = make([]byte, 96<<20) +var randomBytes = make([]byte, 128<<20) func init() { // increase the random bytes to the required maximum From 985c0d602fbc652c1152090cacd0edb173edc554 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Fri, 15 Jan 2016 11:47:26 +0000 Subject: [PATCH 371/501] StorageDriver GCS: try google.DefaultTokenSource first Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs_test.go | 40 +++++++++++++++++------------ 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 60f3e957..31494bde 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -31,7 +31,7 @@ func init() { // Skip GCS storage driver tests if environment variable parameters are not provided skipGCS = func() string { if bucket == "" || credentials == "" { - return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, REGISTRY_STORAGE_GCS_CREDS" + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" } return "" } @@ -45,30 +45,36 @@ func init() { panic(err) } defer os.Remove(root) + var ts oauth2.TokenSource + var email string + var privateKey []byte - _, err = os.Stat(credentials) - if err == nil { - jsonKey, err := ioutil.ReadFile(credentials) - if err != nil { - panic(fmt.Sprintf("Unable to read credentials from file : %s", err)) - } - credentials = string(jsonKey) - } - - // Assume that the file contents are within the environment variable since it exists - // but does not contain a valid file path - jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) if err != nil { - panic(fmt.Sprintf("Error reading JWT config : %s", err)) + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + email = jwtConfig.Email + privateKey = []byte(jwtConfig.PrivateKey) + if len(privateKey) == 0 { + panic("Error reading JWT config : missing private_key property") + } + if email == "" { + panic("Error reading JWT config : missing client_email property") + } + ts = jwtConfig.TokenSource(ctx.Background()) } gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { parameters := driverParameters{ bucket: bucket, rootDirectory: root, - email: jwtConfig.Email, - privateKey: []byte(jwtConfig.PrivateKey), - client: oauth2.NewClient(ctx.Background(), jwtConfig.TokenSource(ctx.Background())), + email: email, + privateKey: privateKey, + client: oauth2.NewClient(ctx.Background(), ts), } return New(parameters) From 3da0ee00d87932eb7d32d8c61b8e0e2631a1909d Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 18 Jan 2016 09:59:50 -0800 Subject: [PATCH 372/501] Do not require "charset=utf-8" for a schema1 with content type application/json For compatibility with other registries that don't use this exact variant of the Content-Type header, we need to be more flexible about what we accept. Any form of "application/json" should be allowed. The charset should not be included in the comparison. See docker/docker#19400. Signed-off-by: Aaron Lehmann --- docs/handlers/api_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index a1aac3cd..f3f5a4fb 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -954,7 +954,14 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manife } - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "", sm2) + // Re-push with a few different Content-Types. The official schema1 + // content type should work, as should application/json with/without a + // charset. + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeManifest, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp, err = http.Get(manifestDigestURL) From f9a3f028b513be193d17bb6887c42c7a1d61376c Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 18 Jan 2016 10:26:45 -0800 Subject: [PATCH 373/501] Fix content type for schema1 signed manifests The Payload function for schema1 currently returns a signed manifest, but indicates the content type is that of a manifest that isn't signed. Note that this breaks compatibility with Registry 2.3 alpha 1 and Docker 1.10-rc1, because they use the incorrect content type. Signed-off-by: Aaron Lehmann --- docs/client/repository_test.go | 8 ++++---- docs/handlers/api_test.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index bdd7ea20..8eedc4c2 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -592,7 +592,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeManifest}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } } else { @@ -602,7 +602,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeManifest}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } @@ -622,7 +622,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeManifest}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), }, }) @@ -636,7 +636,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeManifest}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), }, }) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index f3f5a4fb..206a461e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -957,7 +957,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manife // Re-push with a few different Content-Types. The official schema1 // content type should work, as should application/json with/without a // charset. - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeManifest, sm2) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) @@ -1486,7 +1486,7 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) t.Fatalf("Error constructing request: %s", err) } req.Header.Set("Accept", manifestlist.MediaTypeManifestList) - req.Header.Add("Accept", schema1.MediaTypeManifest) + req.Header.Add("Accept", schema1.MediaTypeSignedManifest) req.Header.Add("Accept", schema2.MediaTypeManifest) resp, err = http.DefaultClient.Do(req) if err != nil { From ffc9527782299ccf1d2a6b30e8c793e7a2b46652 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Tue, 19 Jan 2016 14:09:32 +0000 Subject: [PATCH 374/501] StorageDriver: Test suite: improve cleanup Verify that the file(s) have been deleted after calling Delete, and retry if this is not the case. Furthermore, report the error if a Delete operation fails. Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 11 +++- docs/storage/driver/gcs/gcs_test.go | 8 ++- docs/storage/driver/testsuites/testsuites.go | 64 +++++++++++++------- 3 files changed, 57 insertions(+), 26 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 765d5492..dd4573b8 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -555,7 +555,16 @@ func (d *driver) Delete(context ctx.Context, path string) error { if len(keys) > 0 { sort.Sort(sort.Reverse(sort.StringSlice(keys))) for _, key := range keys { - if err := storage.DeleteObject(gcsContext, d.bucket, key); err != nil { + err := storage.DeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, solistAll might return + // paths that no longer exist. If this happens, just ignore any not + // found error + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + if err != nil { return err } } diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 31494bde..554d95e4 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -155,8 +155,12 @@ func TestEmptyRootList(t *testing.T) { if err != nil { t.Fatalf("unexpected error creating content: %v", err) } - defer rootedDriver.Delete(ctx, filename) - + defer func() { + err := rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to remove %v due to %v\n", filename, err) + } + }() keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 6fea2def..5c34cca6 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -120,7 +120,7 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { for _, filename := range validFiles { err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(suite.ctx, filename) @@ -129,6 +129,21 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { } } +func (suite *DriverSuite) deletePath(c *check.C, path string) { + for tries := 2; tries > 0; tries-- { + err := suite.StorageDriver.Delete(suite.ctx, path) + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil + } + c.Assert(err, check.IsNil) + paths, err := suite.StorageDriver.List(suite.ctx, path) + if len(paths) == 0 { + break + } + time.Sleep(time.Second * 2) + } +} + // TestInvalidPaths checks that various invalid file paths are rejected by the // storage driver. func (suite *DriverSuite) TestInvalidPaths(c *check.C) { @@ -143,7 +158,10 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + // only delete if file was succesfully written + if err == nil { + defer suite.deletePath(c, firstPart(filename)) + } c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) @@ -258,7 +276,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) checksum := sha1.New() var fileSize int64 = 5 * 1024 * 1024 * 1024 @@ -282,7 +300,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) chunkSize := int64(32) @@ -372,7 +390,7 @@ func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) @@ -470,7 +488,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) + defer suite.deletePath(c, rootDirectory) doesnotexist := path.Join(rootDirectory, "nonexistent") _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) @@ -516,8 +534,8 @@ func (suite *DriverSuite) TestMove(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) c.Assert(err, check.IsNil) @@ -543,8 +561,8 @@ func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { sourceContents := randomContents(32) destContents := randomContents(64) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + defer suite.deletePath(c, firstPart(sourcePath)) + defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) c.Assert(err, check.IsNil) @@ -572,7 +590,7 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) + defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) c.Assert(err, check.IsNil) @@ -594,7 +612,7 @@ func (suite *DriverSuite) TestMoveInvalid(c *check.C) { // Create a regular file. err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete(suite.ctx, "/notadir") + defer suite.deletePath(c, "/notadir") // Now try to move a non-existent file under it. err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") @@ -607,7 +625,7 @@ func (suite *DriverSuite) TestDelete(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) @@ -627,7 +645,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { filename := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) @@ -674,7 +692,7 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { filename3 := randomPath(32) contents := randomContents(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) + defer suite.deletePath(c, firstPart(dirname)) err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) c.Assert(err, check.IsNil) @@ -725,7 +743,7 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) + defer suite.deletePath(c, firstPart(dirPath)) // Call on non-existent file/dir, check error. fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) @@ -788,7 +806,7 @@ func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { filename := randomPath(32) contents := randomContents(4096) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) @@ -814,7 +832,7 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { filename := randomPath(32) contents := randomContents(filesize) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) @@ -872,7 +890,7 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { } filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) var offset int64 var misswrites int @@ -1033,7 +1051,7 @@ func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { for i := 0; i < c.N; i++ { parentDir := randomPath(8) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) + defer suite.deletePath(c, firstPart(parentDir)) c.StopTimer() for j := int64(0); j < numFiles; j++ { @@ -1055,7 +1073,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { defer tf.Close() filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) contents := randomContents(size) @@ -1080,7 +1098,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) @@ -1092,7 +1110,7 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents } func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) + defer suite.deletePath(c, firstPart(filename)) nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) From 59a9607783490dbb185f8e63f1cab15064c85d9a Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Tue, 19 Jan 2016 14:40:00 +0000 Subject: [PATCH 375/501] StorageDriver: GCS: retry all api calls Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 64 ++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index dd4573b8..0e3480f2 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -206,7 +206,7 @@ func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io. } if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { res.Body.Close() - obj, err := storage.StatObject(d.context(context), d.bucket, name) + obj, err := storageStatObject(d.context(context), d.bucket, name) if err != nil { return nil, err } @@ -287,7 +287,7 @@ func (d *driver) WriteStream(context ctx.Context, path string, offset int64, rea } // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end // of the function - defer storage.DeleteObject(gcsContext, d.bucket, partName) + defer storageDeleteObject(gcsContext, d.bucket, partName) req := &storageapi.ComposeRequest{ Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, @@ -386,7 +386,7 @@ func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, var fi storagedriver.FileInfoFields //try to get as file gcsContext := d.context(context) - obj, err := storage.StatObject(gcsContext, d.bucket, d.pathToKey(path)) + obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) if err == nil { fi = storagedriver.FileInfoFields{ Path: path, @@ -404,7 +404,7 @@ func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, query.Prefix = dirpath query.MaxResults = 1 - objects, err := storage.ListObjects(gcsContext, d.bucket, query) + objects, err := storageListObjects(gcsContext, d.bucket, query) if err != nil { return nil, err } @@ -432,7 +432,7 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { query.Prefix = d.pathToDirKey(path) list := make([]string, 0, 64) for { - objects, err := storage.ListObjects(d.context(context), d.bucket, query) + objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } @@ -482,7 +482,7 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e var err error for _, key := range keys { dest := destPrefix + key[len(prefix):] - _, err = storage.CopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) + _, err = storageCopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) if err == nil { copies = append(copies, dest) } else { @@ -492,20 +492,20 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e // if an error occurred, attempt to cleanup the copies made if err != nil { for i := len(copies) - 1; i >= 0; i-- { - _ = storage.DeleteObject(gcsContext, d.bucket, copies[i]) + _ = storageDeleteObject(gcsContext, d.bucket, copies[i]) } return err } // delete originals for i := len(keys) - 1; i >= 0; i-- { - err2 := storage.DeleteObject(gcsContext, d.bucket, keys[i]) + err2 := storageDeleteObject(gcsContext, d.bucket, keys[i]) if err2 != nil { err = err2 } } return err } - _, err = storage.CopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + _, err = storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { @@ -514,7 +514,7 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e } return err } - return storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + return storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) } // listAll recursively lists all names of objects stored at "prefix" and its subpaths. @@ -524,7 +524,7 @@ func (d *driver) listAll(context context.Context, prefix string) ([]string, erro query.Prefix = prefix query.Versions = false for { - objects, err := storage.ListObjects(d.context(context), d.bucket, query) + objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } @@ -555,8 +555,8 @@ func (d *driver) Delete(context ctx.Context, path string) error { if len(keys) > 0 { sort.Sort(sort.Reverse(sort.StringSlice(keys))) for _, key := range keys { - err := storage.DeleteObject(gcsContext, d.bucket, key) - // GCS only guarantees eventual consistency, solistAll might return + err := storageDeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, so listAll might return // paths that no longer exist. If this happens, just ignore any not // found error if status, ok := err.(*googleapi.Error); ok { @@ -570,7 +570,7 @@ func (d *driver) Delete(context ctx.Context, path string) error { } return nil } - err = storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { @@ -581,6 +581,42 @@ func (d *driver) Delete(context ctx.Context, path string) error { return err } +func storageDeleteObject(context context.Context, bucket string, name string) error { + return retry(5, func() error { + return storage.DeleteObject(context, bucket, name) + }) +} + +func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { + var obj *storage.Object + err := retry(5, func() error { + var err error + obj, err = storage.StatObject(context, bucket, name) + return err + }) + return obj, err +} + +func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { + var objs *storage.Objects + err := retry(5, func() error { + var err error + objs, err = storage.ListObjects(context, bucket, q) + return err + }) + return objs, err +} + +func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { + var obj *storage.Object + err := retry(5, func() error { + var err error + obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) + return err + }) + return obj, err +} + // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // Returns ErrUnsupportedMethod if this driver has no privateKey From 59254013beefb037d060490a249cef9ce96261f8 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 20 Jan 2016 14:45:08 -0800 Subject: [PATCH 376/501] Handle nonstandard token endpoint errors https://github.com/docker/distribution/pull/1249 changed token fetching to parse HTTP error response bodies as serialized errcodes. However, Docker Hub's authentication endpoint does not return error bodies in this format. To work around this, convert its format into ErrCodeUnauthorized or ErrCodeUnknown. Signed-off-by: Aaron Lehmann --- docs/api/errcode/errors.go | 9 +++++++++ docs/client/errors.go | 19 ++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/docs/api/errcode/errors.go b/docs/api/errcode/errors.go index 9a405d21..6d9bb4b6 100644 --- a/docs/api/errcode/errors.go +++ b/docs/api/errcode/errors.go @@ -69,6 +69,15 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { diff --git a/docs/client/errors.go b/docs/client/errors.go index 8e3cb108..a528a865 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -31,13 +31,26 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(r io.Reader) error { +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + if statusCode == http.StatusUnauthorized { + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + } + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, @@ -53,14 +66,14 @@ func parseHTTPErrorResponse(r io.Reader) error { // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.Body) + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.Body) + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } From e9bcc96ad27c3e583c6a417ddea6d204765e1ef1 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 21 Jan 2016 09:34:06 -0800 Subject: [PATCH 377/501] If the media type for a manifest is unrecognized, default to schema1 This is needed for compatibility with some third-party registries that send an inappropriate Content-Type header such as text/html. Signed-off-by: Aaron Lehmann --- docs/client/repository_test.go | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 8eedc4c2..69987c87 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -610,7 +610,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } -func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { +func addTestManifest(repo, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -622,7 +622,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, + "Content-Type": {mediatype}, }), }, }) @@ -636,7 +636,7 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {schema1.MediaTypeSignedManifest}, + "Content-Type": {mediatype}, }), }, }) @@ -678,8 +678,9 @@ func TestV1ManifestFetch(t *testing.T) { if err != nil { t.Fatal(err) } - addTestManifest(repo, dgst.String(), pl, &m) - addTestManifest(repo, "latest", pl, &m) + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) e, c := testServer(m) defer c() @@ -726,6 +727,19 @@ func TestV1ManifestFetch(t *testing.T) { if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } + + manifest, err = ms.Get(ctx, dgst, WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } } func TestManifestFetchWithEtag(t *testing.T) { From e9692b8037d032d3dfd6cd5c2f9737aa22884e57 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 15 Dec 2015 14:35:23 -0800 Subject: [PATCH 378/501] Use reference package internally Most places in the registry were using string types to refer to repository names. This changes them to use reference.Named, so the type system can enforce validation of the naming rules. Signed-off-by: Aaron Lehmann --- docs/api/v2/urls.go | 21 +++--- docs/api/v2/urls_test.go | 17 +++-- docs/client/repository.go | 24 +++--- docs/client/repository_test.go | 101 ++++++++++++-------------- docs/handlers/api_test.go | 91 +++++++++++++---------- docs/handlers/app.go | 15 +++- docs/handlers/app_test.go | 2 +- docs/handlers/blobupload.go | 4 +- docs/handlers/images.go | 13 +++- docs/handlers/tags.go | 4 +- docs/proxy/proxyblobstore.go | 5 +- docs/proxy/proxyblobstore_test.go | 10 ++- docs/proxy/proxymanifeststore.go | 5 +- docs/proxy/proxymanifeststore_test.go | 10 ++- docs/proxy/proxyregistry.go | 9 ++- docs/proxy/scheduler/scheduler.go | 10 ++- docs/storage/blob_test.go | 16 ++-- docs/storage/blobwriter.go | 2 +- docs/storage/blobwriter_resumable.go | 4 +- docs/storage/linkedblobstore.go | 18 ++--- docs/storage/manifeststore.go | 2 +- docs/storage/manifeststore_test.go | 10 ++- docs/storage/registry.go | 15 +--- docs/storage/signaturestore.go | 2 +- docs/storage/tagstore.go | 14 ++-- docs/storage/tagstore_test.go | 4 +- 26 files changed, 235 insertions(+), 193 deletions(-) diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 6ba39cc9..5b63ccaa 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) @@ -113,10 +114,10 @@ func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { } // BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) - tagsURL, err := route.URL("name", name) + tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -126,10 +127,10 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { +func (ub *URLBuilder) BuildManifestURL(name reference.Named, reference string) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name, "reference", reference) + manifestURL, err := route.URL("name", name.Name(), "reference", reference) if err != nil { return "", err } @@ -138,10 +139,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { +func (ub *URLBuilder) BuildBlobURL(name reference.Named, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name, "digest", dgst.String()) + layerURL, err := route.URL("name", name.Name(), "digest", dgst.String()) if err != nil { return "", err } @@ -151,10 +152,10 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) - uploadURL, err := route.URL("name", name) + uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -166,10 +167,10 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (str // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) - uploadURL, err := route.URL("name", name, "uuid", uuid) + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 0ad33add..7dab00fc 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -4,6 +4,8 @@ import ( "net/http" "net/url" "testing" + + "github.com/docker/distribution/reference" ) type urlBuilderTestCase struct { @@ -13,6 +15,7 @@ type urlBuilderTestCase struct { } func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.ParseNamed("foo/bar") return []urlBuilderTestCase{ { description: "test base url", @@ -23,35 +26,35 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", build: func() (string, error) { - return urlBuilder.BuildTagsURL("foo/bar") + return urlBuilder.BuildTagsURL(fooBarRef) }, }, { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { - return urlBuilder.BuildManifestURL("foo/bar", "tag") + return urlBuilder.BuildManifestURL(fooBarRef, "tag") }, }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") }, }, { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar") + return urlBuilder.BuildBlobUploadURL(fooBarRef) }, }, { description: "build blob upload url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) @@ -61,14 +64,14 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, }, { description: "build blob upload chunk url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) diff --git a/docs/client/repository.go b/docs/client/repository.go index d6521211..43826907 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -98,11 +98,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if _, err := reference.ParseNamed(name); err != nil { - return nil, err - } - +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err @@ -125,21 +121,21 @@ type repository struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } -func (r *repository) Name() string { +func (r *repository) Name() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, } return &blobs{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), @@ -149,7 +145,7 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), @@ -170,7 +166,7 @@ type tags struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } // All returns all tags @@ -293,7 +289,7 @@ func (t *tags) Untag(ctx context.Context, tag string) error { } type manifests struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client etags map[string]string @@ -493,7 +489,7 @@ func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { }*/ type blobs struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client @@ -666,7 +662,7 @@ func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { } type blobStatter struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 69987c87..b7b782c7 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -98,11 +98,11 @@ func addTestCatalog(route string, content []byte, link string, m *testutil.Reque func TestBlobDelete(t *testing.T) { dgst, _ := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/repo1" + repo, _ := reference.ParseNamed("test.example.com/repo1") m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -137,7 +137,8 @@ func TestBlobFetch(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -157,12 +158,12 @@ func TestBlobFetch(t *testing.T) { func TestBlobExistsNoContentLength(t *testing.T) { var m testutil.RequestResponseMap - repo := "biff" + repo, _ := reference.ParseNamed("biff") dgst, content := newRandomBlob(1024) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -177,7 +178,7 @@ func TestBlobExistsNoContentLength(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -216,7 +217,8 @@ func TestBlobExists(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -247,18 +249,18 @@ func TestBlobUploadChunked(t *testing.T) { b1[512:513], b1[513:1024], } - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, "Docker-Upload-UUID": {uuids[0]}, "Range": {"0-0"}, }), @@ -271,14 +273,14 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], Body: chunk, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, "Docker-Upload-UUID": {uuids[i+1]}, "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, }), @@ -289,7 +291,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -306,7 +308,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -362,18 +364,18 @@ func TestBlobUploadChunked(t *testing.T) { func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Range": {"0-0"}, }), @@ -382,13 +384,13 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, Body: b1, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, @@ -399,7 +401,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -416,7 +418,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -470,29 +472,22 @@ func TestBlobUploadMonolithic(t *testing.T) { func TestBlobMount(t *testing.T) { dgst, content := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/uploadrepo" - sourceRepo := "test.example.com/sourcerepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") - namedRef, err := reference.ParseNamed(sourceRepo) - if err != nil { - t.Fatal(err) - } - canonicalRef, err := reference.WithDigest(namedRef, dgst) - if err != nil { - t.Fatal(err) - } + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", - QueryParams: map[string][]string{"from": {sourceRepo}, "mount": {dgst.String()}}, + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/" + dgst.String()}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, "Docker-Content-Digest": {dgst.String()}, }), }, @@ -500,7 +495,7 @@ func TestBlobMount(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -531,7 +526,7 @@ func TestBlobMount(t *testing.T) { if ebm.From.Digest() != dgst { t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) } - if ebm.From.Name() != sourceRepo { + if ebm.From.Name() != sourceRepo.Name() { t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) } } else { @@ -539,7 +534,7 @@ func TestBlobMount(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) @@ -551,7 +546,7 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed } m := schema1.Manifest{ - Name: name, + Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, @@ -574,11 +569,11 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed return sm, digest.FromBytes(sm.Canonical), sm.Canonical } -func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { actualDigest := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, Headers: http.Header(map[string][]string{ "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), @@ -610,11 +605,11 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } -func addTestManifest(repo, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -629,7 +624,7 @@ func addTestManifest(repo, reference string, mediatype string, content []byte, m *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -671,7 +666,7 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap _, pl, err := m1.Payload() @@ -743,7 +738,7 @@ func TestV1ManifestFetch(t *testing.T) { } func TestManifestFetchWithEtag(t *testing.T) { - repo := "test.example.com/repo/by/tag" + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) @@ -773,14 +768,14 @@ func TestManifestFetchWithEtag(t *testing.T) { } func TestManifestDelete(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -813,7 +808,7 @@ func TestManifestDelete(t *testing.T) { } func TestManifestPut(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) _, payload, err := m1.Payload() @@ -824,7 +819,7 @@ func TestManifestPut(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/manifests/other", + Route: "/v2/" + repo.Name() + "/manifests/other", Body: payload, }, Response: testutil.Response{ @@ -857,7 +852,7 @@ func TestManifestPut(t *testing.T) { } func TestManifestTags(t *testing.T) { - repo := "test.example.com/repo/tags/list" + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []byte(strings.TrimSpace(` { "name": "test.example.com/repo/tags/list", @@ -873,7 +868,7 @@ func TestManifestTags(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/tags/list", + Route: "/v2/" + repo.Name() + "/tags/list", }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -919,14 +914,14 @@ func TestManifestTags(t *testing.T) { } func TestManifestUnauthorized(t *testing.T) { - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusUnauthorized, diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 206a461e..b59db6cc 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -26,6 +26,7 @@ import ( "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -251,7 +252,7 @@ func TestURLPrefix(t *testing.T) { } type blobArgs struct { - imageName string + imageName reference.Named layerFile io.ReadSeeker layerDigest digest.Digest } @@ -263,10 +264,10 @@ func makeBlobArgs(t *testing.T) blobArgs { } args := blobArgs{ - imageName: "foo/bar", layerFile: layerFile, layerDigest: layerDigest, } + args.imageName, _ = reference.ParseNamed("foo/bar") return args } @@ -609,7 +610,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { func TestDeleteDisabled(t *testing.T) { env := newTestEnv(t, false) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { @@ -634,7 +635,7 @@ func TestDeleteDisabled(t *testing.T) { func TestDeleteReadOnly(t *testing.T) { env := newTestEnv(t, true) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { @@ -662,7 +663,7 @@ func TestStartPushReadOnly(t *testing.T) { env := newTestEnv(t, true) env.app.readOnly = true - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) if err != nil { @@ -693,42 +694,49 @@ func httpDelete(url string) (*http.Response, error) { } type manifestArgs struct { - imageName string + imageName reference.Named mediaType string manifest distribution.Manifest dgst digest.Digest } func TestManifestAPI(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + deleteEnabled := false env := newTestEnv(t, deleteEnabled) - testManifestAPISchema1(t, env, "foo/schema1") - schema2Args := testManifestAPISchema2(t, env, "foo/schema2") + testManifestAPISchema1(t, env, schema1Repo) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) testManifestAPIManifestList(t, env, schema2Args) deleteEnabled = true env = newTestEnv(t, deleteEnabled) - testManifestAPISchema1(t, env, "foo/schema1") - schema2Args = testManifestAPISchema2(t, env, "foo/schema2") + testManifestAPISchema1(t, env, schema1Repo) + schema2Args = testManifestAPISchema2(t, env, schema2Repo) testManifestAPIManifestList(t, env, schema2Args) } func TestManifestDelete(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + deleteEnabled := true env := newTestEnv(t, deleteEnabled) - schema1Args := testManifestAPISchema1(t, env, "foo/schema1") + schema1Args := testManifestAPISchema1(t, env, schema1Repo) testManifestDelete(t, env, schema1Args) - schema2Args := testManifestAPISchema2(t, env, "foo/schema2") + schema2Args := testManifestAPISchema2(t, env, schema2Repo) testManifestDelete(t, env, schema2Args) } func TestManifestDeleteDisabled(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") deleteEnabled := false env := newTestEnv(t, deleteEnabled) - testManifestDeleteDisabled(t, env, "foo/schema1") + testManifestDeleteDisabled(t, env, schema1Repo) } -func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName string) { +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) @@ -743,7 +751,7 @@ func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName string) { checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) } -func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manifestArgs { +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "thetag" args := manifestArgs{imageName: imageName} @@ -784,7 +792,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manife Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: imageName, + Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{ { @@ -1032,8 +1040,8 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manife t.Fatalf("unexpected error decoding error response: %v", err) } - if tagsResponse.Name != imageName { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) } if len(tagsResponse.Tags) != 1 { @@ -1060,7 +1068,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName string) manife return args } -func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manifestArgs { +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "schema2tag" args := manifestArgs{ imageName: imageName, @@ -1340,7 +1348,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manife t.Fatalf("unexpected error decoding error response: %v", err) } - if tagsResponse.Name != imageName { + if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } @@ -1379,7 +1387,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName string) manife if fetchedSchema1Manifest.Architecture != "amd64" { t.Fatal("wrong architecture") } - if fetchedSchema1Manifest.Name != imageName { + if fetchedSchema1Manifest.Name != imageName.Name() { t.Fatal("wrong image name") } if fetchedSchema1Manifest.Tag != tag { @@ -1602,7 +1610,7 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) if fetchedSchema1Manifest.Architecture != "amd64" { t.Fatal("wrong architecture") } - if fetchedSchema1Manifest.Name != imageName { + if fetchedSchema1Manifest.Name != imageName.Name() { t.Fatal("wrong image name") } if fetchedSchema1Manifest.Tag != tag { @@ -1715,7 +1723,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { t.Fatalf("unexpected error decoding error response: %v", err) } - if tagsResponse.Name != imageName { + if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } @@ -1749,7 +1757,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { t.Fatalf("unexpected error decoding error response: %v", err) } - if tagsResponse.Name != imageName { + if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } @@ -1863,7 +1871,7 @@ func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *htt return resp } -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named) (location string, uuid string) { layerUploadURL, err := ub.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) @@ -1875,7 +1883,7 @@ func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location stri } defer resp.Body.Close() - checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) u, err := url.Parse(resp.Header.Get("Location")) if err != nil { @@ -1894,7 +1902,7 @@ func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location stri // doPushLayer pushes the layer content returning the url on success returning // the response. If you're only expecting a successful response, use pushLayer. -func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { u, err := url.Parse(uploadURLBase) if err != nil { t.Fatalf("unexpected error parsing pushLayer url: %v", err) @@ -1918,7 +1926,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges } // pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { +func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) @@ -1949,7 +1957,7 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, return resp.Header.Get("Location") } -func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string { +func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) @@ -1997,7 +2005,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp return resp, digester.Digest(), err } -func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { +func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { resp, dgst, err := doPushChunk(t, uploadURLBase, body) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) @@ -2133,6 +2141,11 @@ func checkErr(t *testing.T, err error, msg string) { } func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { + imageNameRef, err := reference.ParseNamed(imageName) + if err != nil { + t.Fatalf("unable to parse reference: %v", err) + } + unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, @@ -2164,8 +2177,8 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + uploadURLBase, _ := startPushLayer(t, env.builder, imageNameRef) + pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) @@ -2176,10 +2189,10 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) dgst := digest.FromBytes(signedManifest.Canonical) // Create this repository by tag to ensure the tag mapping is made in the registry - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(imageNameRef, tag) checkErr(t, err, "building manifest url") - location, err := env.builder.BuildManifestURL(imageName, dgst.String()) + location, err := env.builder.BuildManifestURL(imageNameRef, dgst.String()) checkErr(t, err, "building location URL") resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) @@ -2197,7 +2210,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { deleteEnabled := true env := newTestEnvMirror(t, deleteEnabled) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" manifestURL, err := env.builder.BuildManifestURL(imageName, tag) if err != nil { @@ -2209,7 +2222,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: imageName, + Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{}, History: []schema1.History{}, @@ -2284,12 +2297,12 @@ func TestProxyManifestGetByTag(t *testing.T) { } truthConfig.HTTP.Headers = headerConfig - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" truthEnv := newTestEnvWithConfig(t, &truthConfig) // create a repository in the truth registry - dgst := createRepository(truthEnv, t, imageName, tag) + dgst := createRepository(truthEnv, t, imageName.Name(), tag) proxyConfig := configuration.Configuration{ Storage: configuration.Storage{ @@ -2322,7 +2335,7 @@ func TestProxyManifestGetByTag(t *testing.T) { }) // Create another manifest in the remote with the same image/tag pair - newDigest := createRepository(truthEnv, t, imageName, tag) + newDigest := createRepository(truthEnv, t, imageName.Name(), tag) if dgst == newDigest { t.Fatalf("non-random test data") } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 23225493..70b7417f 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/health" "github.com/docker/distribution/health/checks" "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" @@ -590,7 +591,19 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) if app.nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) + nameRef, err := reference.ParseNamed(getName(context)) + if err != nil { + ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) + context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ + Name: getName(context), + Reason: err, + }) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + repository, err := app.registry.Repository(context, nameRef) if err != nil { ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index de27f443..907ae53a 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -48,7 +48,7 @@ func TestAppDispatcher(t *testing.T) { varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context - if ctx.Repository.Name() != getName(ctx) { + if ctx.Repository.Name().Name() != getName(ctx) { t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 1e3bff95..56403dd9 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -46,7 +46,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } buh.State = state - if state.Name != ctx.Repository.Name() { + if state.Name != ctx.Repository.Name().Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) @@ -312,7 +312,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. } // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Name() + buh.State.Name = buh.Repository.Name().Name() buh.State.UUID = buh.Upload.ID() buh.State.Offset = offset buh.State.StartedAt = buh.Upload.StartedAt() diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 51156d3b..9b4e5399 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" @@ -173,7 +174,17 @@ func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2 return nil, err } - builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, imh.Repository.Name(), imh.Tag, configJSON) + ref := imh.Repository.Name() + + if imh.Tag != "" { + ref, err = reference.WithTag(imh.Repository.Name(), imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) + return nil, err + } + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) for _, d := range schema2Manifest.References() { if err := builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index d9f0106c..72c21bbe 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -40,7 +40,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name().Name()})) default: th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } @@ -51,7 +51,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name(), + Name: th.Repository.Name().Name(), Tags: tags, }); err != nil { th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index 41b76e8e..278e5864 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -133,7 +134,7 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } - pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) + pbs.scheduler.AddBlob(dgst, repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) @@ -169,7 +170,7 @@ func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution. return nil, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 7702771c..978f878e 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -114,6 +115,11 @@ func (te *testEnv) RemoteStats() *map[string]int { // Populate remote store and record the digests func makeTestEnv(t *testing.T, name string) *testEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + ctx := context.Background() truthDir, err := ioutil.TempDir("", "truth") @@ -131,7 +137,7 @@ func makeTestEnv(t *testing.T, name string) *testEnv { if err != nil { t.Fatalf("error creating registry: %v", err) } - localRepo, err := localRegistry.Repository(ctx, name) + localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -140,7 +146,7 @@ func makeTestEnv(t *testing.T, name string) *testEnv { if err != nil { t.Fatalf("error creating registry: %v", err) } - truthRepo, err := truthRegistry.Repository(ctx, name) + truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 13cb5f6b..e0a5ac28 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -6,6 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -16,7 +17,7 @@ type proxyManifestStore struct { ctx context.Context localManifests distribution.ManifestService remoteManifests distribution.ManifestService - repositoryName string + repositoryName reference.Named scheduler *scheduler.TTLExpirationScheduler } @@ -65,7 +66,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) + pms.scheduler.AddBlob(dgst, repositoryTTL) } return manifest, err diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index aeecae10..5e717bf0 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -64,12 +65,17 @@ func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, */ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + ctx := context.Background() truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } - truthRepo, err := truthRegistry.Repository(ctx, name) + truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -91,7 +97,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE if err != nil { t.Fatalf("error creating registry: %v", err) } - localRepo, err := localRegistry.Repository(ctx, name) + localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index 8e1be5f2..1b3fcf32 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -71,9 +72,9 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la return pr.embedded.Repositories(ctx, repos, last) } -func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { +func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name.Name(), "pull"))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { @@ -121,7 +122,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri type proxiedRepository struct { blobStore distribution.BlobStore manifests distribution.ManifestService - name string + name reference.Named tags distribution.TagService } @@ -133,7 +134,7 @@ func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { return pr.blobStore } -func (pr *proxiedRepository) Name() string { +func (pr *proxiedRepository) Name() reference.Named { return pr.name } diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go index e91920a1..f5334907 100644 --- a/docs/proxy/scheduler/scheduler.go +++ b/docs/proxy/scheduler/scheduler.go @@ -7,6 +7,8 @@ import ( "time" "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) @@ -80,19 +82,19 @@ func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { } // AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddBlob(dgst digest.Digest, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } - ttles.add(dgst, ttl, entryTypeBlob) + ttles.add(dgst.String(), ttl, entryTypeBlob) return nil } // AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddManifest(repoName reference.Named, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() @@ -100,7 +102,7 @@ func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Durat return fmt.Errorf("scheduler not started") } - ttles.add(repoName, ttl, entryTypeManifest) + ttles.add(repoName.Name(), ttl, entryTypeManifest) return nil } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index e1eacc00..246648b0 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -27,7 +27,7 @@ func TestSimpleBlobUpload(t *testing.T) { } ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -209,7 +209,7 @@ func TestSimpleBlobUpload(t *testing.T) { // other tests. func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -320,8 +320,8 @@ func TestBlobMount(t *testing.T) { } ctx := context.Background() - imageName := "foo/bar" - sourceImageName := "foo/source" + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -378,11 +378,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) } - namedRef, err := reference.ParseNamed(sourceRepository.Name()) - if err != nil { - t.Fatal(err) - } - canonicalRef, err := reference.WithDigest(namedRef, desc.Digest) + canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest) if err != nil { t.Fatal(err) } @@ -476,7 +472,7 @@ func TestBlobMount(t *testing.T) { // TestLayerUploadZeroLength uploads zero-length func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 37903176..e485cc6d 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -326,7 +326,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().Name(), id: bw.id, }) diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index d33f544d..fc62bcc4 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -113,7 +113,7 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, @@ -159,7 +159,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index a1f8724d..0c0c622c 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -142,7 +142,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. } if opts.Mount.ShouldMount { - desc, err := lbs.mount(ctx, opts.Mount.From.Name(), opts.Mount.From.Digest()) + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) if err == nil { // Mount successful, no need to initiate an upload session return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} @@ -153,7 +153,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. startedAt := time.Now().UTC() path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: uuid, }) @@ -162,7 +162,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. } startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: uuid, }) @@ -182,7 +182,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: id, }) @@ -206,7 +206,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution } path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: id, }) @@ -236,7 +236,7 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro return nil } -func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo string, dgst digest.Digest) (distribution.Descriptor, error) { +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { repo, err := lbs.registry.Repository(ctx, sourceRepo) if err != nil { return distribution.Descriptor{}, err @@ -298,7 +298,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution } seenDigests[dgst] = struct{}{} - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } @@ -368,7 +368,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } @@ -391,7 +391,7 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return "", err } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 31daa83c..33c0c351 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -77,7 +77,7 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. if err != nil { if err == distribution.ErrBlobUnknown { return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Name(), + Name: ms.repository.Name().Name(), Revision: dgst, } } diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index a41feb04..7885c466 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -23,11 +24,11 @@ type manifestStoreTestEnv struct { driver driver.StorageDriver registry distribution.Namespace repository distribution.Repository - name string + name reference.Named tag string } -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( @@ -52,7 +53,8 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE } func TestManifestStorage(t *testing.T) { - env := newManifestStoreTestEnv(t, "foo/bar", "thetag") + repoName, _ := reference.ParseNamed("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag") ctx := context.Background() ms, err := env.repository.Manifests(ctx) if err != nil { @@ -63,7 +65,7 @@ func TestManifestStorage(t *testing.T) { Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: env.name, + Name: env.name.Name(), Tag: env.tag, } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 869895dd..be570cbc 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -107,18 +107,11 @@ func (reg *registry) Scope() distribution.Scope { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { - if _, err := reference.ParseNamed(canonicalName); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: canonicalName, - Reason: err, - } - } - +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) if err != nil { return nil, err } @@ -136,12 +129,12 @@ func (reg *registry) Repository(ctx context.Context, canonicalName string) (dist type repository struct { *registry ctx context.Context - name string + name reference.Named descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. -func (repo *repository) Name() string { +func (repo *repository) Name() reference.Named { return repo.name } diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index ede4e0e2..205d6009 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -16,7 +16,7 @@ type signatureStore struct { func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name(), + name: s.repository.Name().Name(), revision: dgst, }) diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index df6e8dfa..8381d244 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -26,7 +26,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { var tags []string pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), }) if err != nil { return tags, err @@ -36,7 +36,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()} default: return tags, err } @@ -53,7 +53,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -73,7 +73,7 @@ func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { // the current tag. The digest must point to a manifest. func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -95,7 +95,7 @@ func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descr // resolve the current revision for name and tag. func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -119,7 +119,7 @@ func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descripto // Untag removes the tag association func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -172,7 +172,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ var tags []string for _, tag := range allTags { tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, } diff --git a/docs/storage/tagstore_test.go b/docs/storage/tagstore_test.go index c257adea..52873a69 100644 --- a/docs/storage/tagstore_test.go +++ b/docs/storage/tagstore_test.go @@ -5,6 +5,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) @@ -21,7 +22,8 @@ func testTagStore(t *testing.T) *tagsTestEnv { t.Fatal(err) } - repo, err := reg.Repository(ctx, "a/b") + repoRef, _ := reference.ParseNamed("a/b") + repo, err := reg.Repository(ctx, repoRef) if err != nil { t.Fatal(err) } From 6149a8c6343f01352876e2c91cc0281547abc823 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 15 Dec 2015 16:43:13 -0800 Subject: [PATCH 379/501] Change URLBuilder methods to use references for tags and digests Signed-off-by: Aaron Lehmann --- docs/api/v2/urls.go | 17 +++++--- docs/api/v2/urls_test.go | 6 ++- docs/client/repository.go | 75 +++++++++++++++++++++++++++--------- docs/handlers/api_test.go | 77 ++++++++++++++++++++++++------------- docs/handlers/blobupload.go | 6 ++- docs/handlers/images.go | 8 +++- 6 files changed, 134 insertions(+), 55 deletions(-) diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 5b63ccaa..408c7b74 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -5,7 +5,6 @@ import ( "net/url" "strings" - "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) @@ -127,10 +126,18 @@ func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name reference.Named, reference string) (string, error) { +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name.Name(), "reference", reference) + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } @@ -139,10 +146,10 @@ func (ub *URLBuilder) BuildManifestURL(name reference.Named, reference string) ( } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name reference.Named, dgst digest.Digest) (string, error) { +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name.Name(), "digest", dgst.String()) + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 7dab00fc..1af1f261 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -33,14 +33,16 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { - return urlBuilder.BuildManifestURL(fooBarRef, "tag") + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) }, }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { - return urlBuilder.BuildBlobURL(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) }, }, { diff --git a/docs/client/repository.go b/docs/client/repository.go index 43826907..1f777add 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -249,7 +249,11 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - u, err := t.ub.BuildManifestURL(t.name, tag) + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) if err != nil { return distribution.Descriptor{}, err } @@ -296,7 +300,11 @@ type manifests struct { } func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } @@ -333,11 +341,19 @@ func (o etagOption) Apply(ms distribution.ManifestService) error { } func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + ) - var tag string for _, option := range options { if opt, ok := option.(withTagOption); ok { - tag = opt.tag + digestOrTag = opt.tag + ref, err = reference.WithTag(ms.name, opt.tag) + if err != nil { + return nil, err + } } else { err := option.Apply(ms) if err != nil { @@ -346,14 +362,15 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } } - var ref string - if tag != "" { - ref = tag - } else { - ref = dgst.String() + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } } - u, err := ms.ub.BuildManifestURL(ms.name, ref) + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } @@ -367,8 +384,8 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis req.Header.Add("Accept", t) } - if _, ok := ms.etags[ref]; ok { - req.Header.Set("If-None-Match", ms.etags[ref]) + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) } resp, err := ms.client.Do(req) @@ -412,11 +429,15 @@ func (o withTagOption) Apply(m distribution.ManifestService) error { // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the // tag name in order to build the correct upload URL. This state is written and read under a lock. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - var tag string + ref := ms.name for _, option := range options { if opt, ok := option.(withTagOption); ok { - tag = opt.tag + var err error + ref, err = reference.WithTag(ref, opt.tag) + if err != nil { + return "", err + } } else { err := option.Apply(ms) if err != nil { @@ -425,7 +446,7 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } } - manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag) + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } @@ -462,7 +483,11 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } @@ -527,7 +552,11 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } @@ -668,7 +697,11 @@ type blobStatter struct { } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } @@ -716,7 +749,11 @@ func buildCatalogValues(maxEntries int, last string) url.Values { } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index b59db6cc..5fffaa5a 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -301,7 +301,8 @@ func TestBlobDeleteDisabled(t *testing.T) { imageName := args.imageName layerDigest := args.layerDigest - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } @@ -324,7 +325,8 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------- // Test fetch for non-existent content - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } @@ -534,7 +536,8 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { layerFile := args.layerFile layerDigest := args.layerDigest - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf(err.Error()) } @@ -617,7 +620,8 @@ func TestDeleteDisabled(t *testing.T) { t.Fatalf("error creating random layer file: %v", err) } - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } @@ -642,7 +646,8 @@ func TestDeleteReadOnly(t *testing.T) { t.Fatalf("error creating random layer file: %v", err) } - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } @@ -737,7 +742,8 @@ func TestManifestDeleteDisabled(t *testing.T) { } func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { - manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + manifestURL, err := env.builder.BuildManifestURL(ref) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -755,7 +761,8 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name tag := "thetag" args := manifestArgs{imageName: imageName} - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -879,7 +886,8 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name args.manifest = signedManifest args.dgst = dgst - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) @@ -1075,7 +1083,8 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name mediaType: schema2.MediaTypeManifest, } - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -1219,7 +1228,8 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name args.dgst = dgst args.manifest = deserializedManifest - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) @@ -1415,7 +1425,8 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) imageName := args.imageName tag := "manifestlisttag" - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -1468,7 +1479,8 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) } dgst := digest.FromBytes(canonical) - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) @@ -1637,8 +1649,9 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { imageName := args.imageName dgst := args.dgst manifest := args.manifest - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + ref, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(ref) // --------------- // Delete by digest resp, err := httpDelete(manifestDigestURL) @@ -1686,8 +1699,9 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { // --------------- // Attempt to delete an unknown manifest - unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) + unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + unknownRef, _ := reference.WithDigest(imageName, unknownDigest) + unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) checkErr(t, err, "building unknown manifest url") resp, err = httpDelete(unknownManifestDigestURL) @@ -1695,11 +1709,12 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) // -------------------- - // Uupload manifest by tag + // Upload manifest by tag tag := "atag" - manifestTagURL, err := env.builder.BuildManifestURL(imageName, tag) - resp = putManifest(t, "putting signed manifest by tag", manifestTagURL, args.mediaType, manifest) - checkResponse(t, "putting signed manifest by tag", resp, http.StatusCreated) + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) + checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -1943,7 +1958,8 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst diges sha256Dgst := digester.Digest() - expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) + ref, _ := reference.WithDigest(name, sha256Dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } @@ -1966,7 +1982,8 @@ func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadU checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + ref, _ := reference.WithDigest(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } @@ -2189,10 +2206,12 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) dgst := digest.FromBytes(signedManifest.Canonical) // Create this repository by tag to ensure the tag mapping is made in the registry - manifestDigestURL, err := env.builder.BuildManifestURL(imageNameRef, tag) + tagRef, _ := reference.WithTag(imageNameRef, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) checkErr(t, err, "building manifest url") - location, err := env.builder.BuildManifestURL(imageNameRef, dgst.String()) + digestRef, _ := reference.WithDigest(imageNameRef, dgst) + location, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building location URL") resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) @@ -2212,7 +2231,8 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error building base url: %v", err) } @@ -2255,7 +2275,8 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob Delete - blobURL, err := env.builder.BuildBlobURL(imageName, digest.DigestSha256EmptyTar) + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + blobURL, err := env.builder.BuildBlobURL(ref) resp, err = httpDelete(blobURL) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) @@ -2316,14 +2337,16 @@ func TestProxyManifestGetByTag(t *testing.T) { proxyEnv := newTestEnvWithConfig(t, &proxyConfig) - manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(imageName, dgst.String()) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp, err := http.Get(manifestDigestURL) checkErr(t, err, "fetching manifest from proxy by digest") defer resp.Body.Close() - manifestTagURL, err := proxyEnv.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) checkErr(t, err, "building manifest url") resp, err = http.Get(manifestTagURL) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 56403dd9..a42e57f6 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -372,7 +372,11 @@ func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string // created blob. A 201 Created is written as well as the canonical URL and // blob digest. func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { - blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) + ref, err := reference.WithDigest(buh.Repository.Name(), desc.Digest) + if err != nil { + return err + } + blobURL, err := buh.urlBuilder.BuildBlobURL(ref) if err != nil { return err } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 9b4e5399..808ead54 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -289,7 +289,13 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } // Construct a canonical url for the uploaded manifest. - location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) + ref, err := reference.WithDigest(imh.Repository.Name(), imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + location, err := imh.urlBuilder.BuildManifestURL(ref) if err != nil { // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to // happen. We'll log the error here but proceed as if it worked. Worst From 586b3d47a780c7976e1e5c02416fc5c7a950be57 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Thu, 21 Jan 2016 11:28:02 +0000 Subject: [PATCH 380/501] Storage: blobwriter.Write/Seek test case Signed-off-by: Arthur Baars --- docs/storage/blob_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index c6cfbcda..1b885eec 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -17,6 +17,39 @@ import ( "github.com/docker/distribution/testutil" ) +// TestWriteSeek tests that the current file size can be +// obtained using Seek +func TestWriteSeek(t *testing.T) { + ctx := context.Background() + imageName := "foo/bar" + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + contents := []byte{1, 2, 3} + blobUpload.Write(contents) + offset, err := blobUpload.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("unexpected error in blobUpload.Seek: %s", err) + } + if offset != int64(len(contents)) { + t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) + } + +} + // TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { From 7dee3d19d9845f94c526429ed10b8d07214ca0f0 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Wed, 20 Jan 2016 15:15:22 +0000 Subject: [PATCH 381/501] Storage: remove bufferedFileWriter (dead code) Signed-off-by: Arthur Baars --- docs/storage/blobwriter.go | 18 ++++++------ docs/storage/filewriter.go | 49 ++------------------------------- docs/storage/filewriter_test.go | 40 ++------------------------- docs/storage/linkedblobstore.go | 2 +- 4 files changed, 14 insertions(+), 95 deletions(-) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 37903176..20171165 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -30,7 +30,7 @@ type blobWriter struct { // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface - bufferedFileWriter + fileWriter resumableDigestEnabled bool } @@ -51,7 +51,7 @@ func (bw *blobWriter) StartedAt() time.Time { func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { context.GetLogger(ctx).Debug("(*blobWriter).Commit") - if err := bw.bufferedFileWriter.Close(); err != nil { + if err := bw.fileWriter.Close(); err != nil { return distribution.Descriptor{}, err } @@ -100,7 +100,7 @@ func (bw *blobWriter) Write(p []byte) (int, error) { return 0, err } - n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + n, err := io.MultiWriter(&bw.fileWriter, bw.digester.Hash()).Write(p) bw.written += int64(n) return n, err @@ -114,7 +114,7 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { return 0, err } - nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + nn, err := bw.fileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) bw.written += nn return nn, err @@ -129,7 +129,7 @@ func (bw *blobWriter) Close() error { return err } - return bw.bufferedFileWriter.Close() + return bw.fileWriter.Close() } // validateBlob checks the data against the digest, returning an error if it @@ -149,7 +149,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } // Stat the on disk file - if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + if fi, err := bw.fileWriter.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is @@ -223,7 +223,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) + fr, err := newFileReader(ctx, bw.fileWriter.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } @@ -357,7 +357,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 try := 1 for try <= 5 { - _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + _, err := bw.fileWriter.driver.Stat(bw.ctx, bw.path) if err == nil { break } @@ -371,7 +371,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { } } - readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + readCloser, err := bw.fileWriter.driver.ReadStream(bw.ctx, bw.path, 0) if err != nil { return nil, err } diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go index 529fa673..7c68f346 100644 --- a/docs/storage/filewriter.go +++ b/docs/storage/filewriter.go @@ -1,7 +1,6 @@ package storage import ( - "bufio" "bytes" "fmt" "io" @@ -11,10 +10,6 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) -const ( - fileWriterBufferSize = 5 << 20 -) - // fileWriter implements a remote file writer backed by a storage driver. type fileWriter struct { driver storagedriver.StorageDriver @@ -30,11 +25,6 @@ type fileWriter struct { err error // terminal error, if set, reader is closed } -type bufferedFileWriter struct { - fileWriter - bw *bufio.Writer -} - // fileWriterInterface makes the desired io compliant interface that the // filewriter should implement. type fileWriterInterface interface { @@ -47,7 +37,7 @@ var _ fileWriterInterface = &fileWriter{} // newFileWriter returns a prepared fileWriter for the driver and path. This // could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { +func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*fileWriter, error) { fw := fileWriter{ driver: driver, path: path, @@ -69,42 +59,7 @@ func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path fw.size = fi.Size() } - buffered := bufferedFileWriter{ - fileWriter: fw, - } - buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) - - return &buffered, nil -} - -// wraps the fileWriter.Write method to buffer small writes -func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { - return bfw.bw.Write(p) -} - -// wraps fileWriter.Close to ensure the buffer is flushed -// before we close the writer. -func (bfw *bufferedFileWriter) Close() (err error) { - if err = bfw.Flush(); err != nil { - return err - } - err = bfw.fileWriter.Close() - return err -} - -// wraps fileWriter.Seek to ensure offset is handled -// correctly in respect to pending data in the buffer -func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { - if err := bfw.Flush(); err != nil { - return 0, err - } - return bfw.fileWriter.Seek(offset, whence) -} - -// wraps bufio.Writer.Flush to allow intermediate flushes -// of the bufferedFileWriter -func (bfw *bufferedFileWriter) Flush() error { - return bfw.bw.Flush() + return &fw, nil } // Write writes the buffer p at the current write offset. diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go index 858b0327..d6782cd4 100644 --- a/docs/storage/filewriter_test.go +++ b/docs/storage/filewriter_test.go @@ -45,7 +45,6 @@ func TestSimpleWrite(t *testing.T) { if err != nil { t.Fatalf("unexpected error writing content: %v", err) } - fw.Flush() if n != len(content) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) @@ -163,41 +162,6 @@ func TestSimpleWrite(t *testing.T) { } } -func TestBufferedFileWriter(t *testing.T) { - ctx := context.Background() - writer, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - // write one byte and ensure the offset hasn't been incremented. - // offset will only get incremented when the buffer gets flushed - short := []byte{byte(1)} - - writer.Write(short) - - if writer.offset > 0 { - t.Fatalf("WriteStream called prematurely") - } - - // write enough data to cause the buffer to flush and confirm - // the offset has been incremented - long := make([]byte, fileWriterBufferSize) - _, err = rand.Read(long) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - for i := range long { - long[i] = byte(i) - } - writer.Write(long) - writer.Close() - if writer.offset != (fileWriterBufferSize + 1) { - t.Fatalf("WriteStream not called when buffer capacity reached") - } -} - func BenchmarkFileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take for i := 0; i < b.N; i++ { @@ -237,14 +201,14 @@ func BenchmarkFileWriter(b *testing.B) { } } -func BenchmarkBufferedFileWriter(b *testing.B) { +func BenchmarkfileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take ctx := context.Background() for i := 0; i < b.N; i++ { bfw, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { - b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) + b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) } randomBytes := make([]byte, 1<<20) diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 430da1ca..908f248b 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -197,7 +197,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string id: uuid, startedAt: startedAt, digester: digest.Canonical.New(), - bufferedFileWriter: *fw, + fileWriter: *fw, resumableDigestEnabled: lbs.resumableDigestEnabled, } From 1eed0ddd072a3954bcbd879266c0053727056528 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 25 Jan 2016 20:11:41 -0800 Subject: [PATCH 382/501] Update token header struct to use json.RawMessage pointer Since RawMessage json receivers take a pointer type, the Header structure should use points in order to call the json.RawMessage marshal and unmarshal functions Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/auth/token/token.go | 16 ++++++++-------- docs/auth/token/token_test.go | 5 +++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/auth/token/token.go b/docs/auth/token/token.go index 166816ee..2598f362 100644 --- a/docs/auth/token/token.go +++ b/docs/auth/token/token.go @@ -52,11 +52,11 @@ type ClaimSet struct { // Header describes the header section of a JSON Web Token. type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` } // Token describes a JSON Web Token. @@ -193,7 +193,7 @@ func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust. switch { case len(x5c) > 0: signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: + case rawJWK != nil: signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) case len(keyID) > 0: signingKey = verifyOpts.TrustedKeys[keyID] @@ -266,8 +266,8 @@ func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtru return } -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) if err != nil { return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) } diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go index 119aa738..9a418295 100644 --- a/docs/auth/token/token_test.go +++ b/docs/auth/token/token_test.go @@ -97,7 +97,8 @@ func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey l return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) } - rawJWK, err := signingKey.PublicKey().MarshalJSON() + var rawJWK json.RawMessage + rawJWK, err = signingKey.PublicKey().MarshalJSON() if err != nil { return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) } @@ -105,7 +106,7 @@ func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey l joseHeader := &Header{ Type: "JWT", SigningAlg: "ES256", - RawJWK: json.RawMessage(rawJWK), + RawJWK: &rawJWK, } now := time.Now() From f757372dd81140683a22d6d1cd83232889bad878 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 26 Jan 2016 14:20:23 -0800 Subject: [PATCH 383/501] Add manifest put by digest to the registry client Signed-off-by: Richard Scothern --- docs/client/repository.go | 21 +++++++++++++++++---- docs/client/repository_test.go | 21 +++++++++++++++++++++ 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 1f777add..5da2239f 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -427,9 +427,10 @@ func (o withTagOption) Apply(m distribution.ManifestService) error { } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. This state is written and read under a lock. +// tag name in order to build the correct upload URL. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { ref := ms.name + var tagged bool for _, option := range options { if opt, ok := option.(withTagOption); ok { @@ -438,6 +439,7 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . if err != nil { return "", err } + tagged = true } else { err := option.Apply(ms) if err != nil { @@ -445,13 +447,24 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . } } } - - manifestURL, err := ms.ub.BuildManifestURL(ref) + mediaType, p, err := m.Payload() if err != nil { return "", err } - mediaType, p, err := m.Payload() + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index b7b782c7..df26b631 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -815,6 +815,7 @@ func TestManifestPut(t *testing.T) { if err != nil { t.Fatal(err) } + var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -831,6 +832,22 @@ func TestManifestPut(t *testing.T) { }, }) + putDgst := digest.FromBytes(m1.Canonical) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {putDgst.String()}, + }), + }, + }) + e, c := testServer(m) defer c() @@ -848,6 +865,10 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } + if _, err := ms.Put(ctx, m1); err != nil { + t.Fatal(err) + } + // TODO(dmcgowan): Check for invalid input error } From a7740f5d0f246f00c17437d7cfb952a299f1b416 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 26 Jan 2016 14:50:38 -0800 Subject: [PATCH 384/501] Correct test digest lengths and enable all unit tests Signed-off-by: Richard Scothern --- docs/storage/cache/cachecheck/suite.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/storage/cache/cachecheck/suite.go b/docs/storage/cache/cachecheck/suite.go index 42390953..13e9c132 100644 --- a/docs/storage/cache/cachecheck/suite.go +++ b/docs/storage/cache/cachecheck/suite.go @@ -17,6 +17,7 @@ func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCachePr checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) checkBlobDescriptorCacheSetAndRead(t, ctx, provider) + checkBlobDescriptorCacheClear(t, ctx, provider) } func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { @@ -141,10 +142,10 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi } } -func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc") +func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { + localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ - Digest: "sha256:abc", + Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", Size: 10, MediaType: "application/octet-stream"} @@ -168,12 +169,11 @@ func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache. err = cache.Clear(ctx, localDigest) if err != nil { - t.Fatalf("unexpected error deleting descriptor") + t.Error(err) } - nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - err = cache.Clear(ctx, nonExistantDigest) + desc, err = cache.Stat(ctx, localDigest) if err == nil { - t.Fatalf("expected error deleting unknown descriptor") + t.Fatalf("expected error statting deleted blob: %v", err) } } From 3e570e59f1cc01bdf82e9bf2bb2ea98b7acd020f Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 26 Jan 2016 16:42:10 -0800 Subject: [PATCH 385/501] Invalidate the blob store descriptor caches when content is removed from from the proxy. Also, switch to reference in the scheduler API. Signed-off-by: Richard Scothern --- docs/proxy/proxyblobstore.go | 16 ++++-- docs/proxy/proxyblobstore_test.go | 7 ++- docs/proxy/proxymanifeststore.go | 12 +++- docs/proxy/proxymanifeststore_test.go | 1 + docs/proxy/proxyregistry.go | 65 +++++++++++++++++---- docs/proxy/scheduler/scheduler.go | 34 ++++++----- docs/proxy/scheduler/scheduler_test.go | 79 +++++++++++++++++--------- 7 files changed, 152 insertions(+), 62 deletions(-) diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index 278e5864..1d7dfbc6 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -18,9 +18,10 @@ import ( const blobTTL = time.Duration(24 * 7 * time.Hour) type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + repositoryName reference.Named } var _ distribution.BlobStore = &proxyBlobStore{} @@ -134,7 +135,14 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } - pbs.scheduler.AddBlob(dgst, repositoryTTL) + + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return + } + + pbs.scheduler.AddBlob(blobRef, repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 978f878e..3054ef0b 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -164,9 +164,10 @@ func makeTestEnv(t *testing.T, name string) *testEnv { s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") proxyBlobStore := proxyBlobStore{ - remoteStore: truthBlobs, - localStore: localBlobs, - scheduler: s, + repositoryName: nameRef, + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, } te := &testEnv{ diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index e0a5ac28..0b5532d4 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -62,11 +62,17 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio return nil, err } - // Schedule the repo for removal - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + // Schedule the manifest blob for removal + repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return nil, err + } + pms.scheduler.AddManifest(repoBlob, repositoryTTL) // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst, repositoryTTL) + //pms.scheduler.AddBlob(blobRef, repositoryTTL) + } return manifest, err diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 5e717bf0..00f9daf9 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -119,6 +119,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE localManifests: localManifests, remoteManifests: truthManifests, scheduler: s, + repositoryName: nameRef, }, } } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index 1b3fcf32..43c1486e 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -4,6 +4,7 @@ import ( "net/http" "net/url" + "fmt" "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" @@ -35,13 +36,56 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name } v := storage.NewVacuum(ctx, driver) - s := scheduler.New(ctx, driver, "/scheduler-state.json") - s.OnBlobExpire(func(digest string) error { - return v.RemoveBlob(digest) + s.OnBlobExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + blobs := repo.Blobs(ctx) + + // Clear the repository reference and descriptor caches + err = blobs.Delete(ctx, r.Digest()) + if err != nil { + return err + } + + err = v.RemoveBlob(r.Digest().String()) + if err != nil { + return err + } + + return nil }) - s.OnManifestExpire(func(repoName string) error { - return v.RemoveRepository(repoName) + + s.OnManifestExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + err = manifests.Delete(ctx, r.Digest()) + if err != nil { + return err + } + return nil }) err = s.Start() @@ -97,11 +141,12 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named return &proxiedRepository{ blobStore: &proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + repositoryName: name, }, - manifests: proxyManifestStore{ + manifests: &proxyManifestStore{ repositoryName: name, localManifests: localManifests, // Options? remoteManifests: remoteManifests, @@ -109,7 +154,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named scheduler: pr.scheduler, }, name: name, - tags: proxyTagService{ + tags: &proxyTagService{ localTags: localRepo.Tags(ctx), remoteTags: remoteRepo.Tags(ctx), }, diff --git a/docs/proxy/scheduler/scheduler.go b/docs/proxy/scheduler/scheduler.go index f5334907..0c8a8534 100644 --- a/docs/proxy/scheduler/scheduler.go +++ b/docs/proxy/scheduler/scheduler.go @@ -7,13 +7,12 @@ import ( "time" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) // onTTLExpiryFunc is called when a repository's TTL expires -type expiryFunc func(string) error +type expiryFunc func(reference.Reference) error const ( entryTypeBlob = iota @@ -82,19 +81,20 @@ func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { } // AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(dgst digest.Digest, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } - ttles.add(dgst.String(), ttl, entryTypeBlob) + + ttles.add(blobRef, ttl, entryTypeBlob) return nil } // AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(repoName reference.Named, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() @@ -102,7 +102,7 @@ func (ttles *TTLExpirationScheduler) AddManifest(repoName reference.Named, ttl t return fmt.Errorf("scheduler not started") } - ttles.add(repoName.Name(), ttl, entryTypeManifest) + ttles.add(manifestRef, ttl, entryTypeManifest) return nil } @@ -156,17 +156,17 @@ func (ttles *TTLExpirationScheduler) Start() error { return nil } -func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { +func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { entry := &schedulerEntry{ - Key: key, + Key: r.String(), Expiry: time.Now().Add(ttl), EntryType: eType, } context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - if oldEntry, present := ttles.entries[key]; present && oldEntry.timer != nil { + if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { oldEntry.timer.Stop() } - ttles.entries[key] = entry + ttles.entries[entry.Key] = entry entry.timer = ttles.startTimer(entry, ttl) ttles.indexDirty = true } @@ -184,13 +184,18 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time. case entryTypeManifest: f = ttles.onManifestExpire default: - f = func(repoName string) error { - return fmt.Errorf("Unexpected scheduler entry type") + f = func(reference.Reference) error { + return fmt.Errorf("scheduler entry type") } } - if err := f(entry.Key); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + ref, err := reference.Parse(entry.Key) + if err == nil { + if err := f(ref); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + } else { + context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) } delete(ttles.entries, entry.Key) @@ -249,6 +254,5 @@ func (ttles *TTLExpirationScheduler) readState() error { if err != nil { return err } - return nil } diff --git a/docs/proxy/scheduler/scheduler_test.go b/docs/proxy/scheduler/scheduler_test.go index 00072ed2..d4edd1b1 100644 --- a/docs/proxy/scheduler/scheduler_test.go +++ b/docs/proxy/scheduler/scheduler_test.go @@ -6,28 +6,49 @@ import ( "time" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) +func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { + ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + return ref1, ref2, ref3 +} + func TestSchedule(t *testing.T) { + ref1, ref2, ref3 := testRefs(t) timeUnit := time.Millisecond remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, - "ch00": true, + ref1.String(): true, + ref2.String(): true, + ref3.String(): true, } s := New(context.Background(), inmemory.New(), "/ttl") - deleteFunc := func(repoName string) error { + deleteFunc := func(repoName reference.Reference) error { if len(remainingRepos) == 0 { t.Fatalf("Incorrect expiry count") } - _, ok := remainingRepos[repoName] + _, ok := remainingRepos[repoName.String()] if !ok { t.Fatalf("Trying to remove nonexistant repo: %s", repoName) } t.Log("removing", repoName) - delete(remainingRepos, repoName) + delete(remainingRepos, repoName.String()) return nil } @@ -37,11 +58,11 @@ func TestSchedule(t *testing.T) { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } - s.add("testBlob1", 3*timeUnit, entryTypeBlob) - s.add("testBlob2", 1*timeUnit, entryTypeBlob) + s.add(ref1, 3*timeUnit, entryTypeBlob) + s.add(ref2, 1*timeUnit, entryTypeBlob) func() { - s.add("ch00", 1*timeUnit, entryTypeBlob) + s.add(ref3, 1*timeUnit, entryTypeBlob) }() @@ -53,33 +74,34 @@ func TestSchedule(t *testing.T) { } func TestRestoreOld(t *testing.T) { + ref1, ref2, _ := testRefs(t) remainingRepos := map[string]bool{ - "testBlob1": true, - "oldRepo": true, + ref1.String(): true, + ref2.String(): true, } - deleteFunc := func(repoName string) error { - if repoName == "oldRepo" && len(remainingRepos) == 3 { - t.Errorf("oldRepo should be removed first") + deleteFunc := func(r reference.Reference) error { + if r.String() == ref1.String() && len(remainingRepos) == 2 { + t.Errorf("ref1 should be removed first") } - _, ok := remainingRepos[repoName] + _, ok := remainingRepos[r.String()] if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + t.Fatalf("Trying to remove nonexistant repo: %s", r) } - delete(remainingRepos, repoName) + delete(remainingRepos, r.String()) return nil } timeUnit := time.Millisecond serialized, err := json.Marshal(&map[string]schedulerEntry{ - "testBlob1": { + ref1.String(): { Expiry: time.Now().Add(1 * timeUnit), - Key: "testBlob1", + Key: ref1.String(), EntryType: 0, }, - "oldRepo": { + ref2.String(): { Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first - Key: "oldRepo", + Key: ref2.String(), EntryType: 0, }, }) @@ -108,13 +130,16 @@ func TestRestoreOld(t *testing.T) { } func TestStopRestore(t *testing.T) { + ref1, ref2, _ := testRefs(t) + timeUnit := time.Millisecond remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, + ref1.String(): true, + ref2.String(): true, } - deleteFunc := func(repoName string) error { - delete(remainingRepos, repoName) + + deleteFunc := func(r reference.Reference) error { + delete(remainingRepos, r.String()) return nil } @@ -127,8 +152,8 @@ func TestStopRestore(t *testing.T) { if err != nil { t.Fatalf(err.Error()) } - s.add("testBlob1", 300*timeUnit, entryTypeBlob) - s.add("testBlob2", 100*timeUnit, entryTypeBlob) + s.add(ref1, 300*timeUnit, entryTypeBlob) + s.add(ref2, 100*timeUnit, entryTypeBlob) // Start and stop before all operations complete // state will be written to fs From a58b7625ba24cd6700d36c992fe7edf6981c3895 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 25 Jan 2016 17:51:54 -0800 Subject: [PATCH 386/501] Support range requests in the client's httpReadSeeker Remove buffering on the reader, because it's not useful. Also remove artificial io.EOF return. Signed-off-by: Aaron Lehmann --- docs/client/transport/http_reader.go | 97 +++++++++++++++++++++------- 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index b27b6c23..22b0b9d6 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -1,12 +1,22 @@ package transport import ( - "bufio" "errors" "fmt" "io" "net/http" "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. @@ -40,8 +50,6 @@ type httpReadSeeker struct { // rc is the remote read closer. rc io.ReadCloser - // brd is a buffer for internal buffered io. - brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes @@ -79,11 +87,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) - // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { - err = io.EOF - } - return n, err } @@ -92,8 +95,18 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + _, err := hrs.reader() if err != nil { + hrs.readerOffset = lastReaderOffset return 0, err } @@ -101,14 +114,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case os.SEEK_CUR: - newOffset += int64(offset) + newOffset += offset case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } - newOffset = hrs.size + int64(offset) + newOffset = hrs.size + offset case os.SEEK_SET: - newOffset = int64(offset) + newOffset = offset } if newOffset < 0 { @@ -131,7 +144,6 @@ func (hrs *httpReadSeeker) Close() error { } hrs.rc = nil - hrs.brd = nil hrs.err = errors.New("httpLayer: closed") @@ -154,7 +166,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.rc != nil { - return hrs.brd, nil + return hrs.rc, nil } req, err := http.NewRequest("GET", hrs.url, nil) @@ -163,10 +175,8 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.readerOffset > 0 { - // TODO(stevvooe): Get this working correctly. - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", "1-") + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } @@ -179,12 +189,55 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - hrs.rc = resp.Body - if resp.StatusCode == http.StatusOK { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } + hrs.rc = resp.Body } else { defer resp.Body.Close() if hrs.errorHandler != nil { @@ -193,11 +246,5 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } - if hrs.brd == nil { - hrs.brd = bufio.NewReader(hrs.rc) - } else { - hrs.brd.Reset(hrs.rc) - } - - return hrs.brd, nil + return hrs.rc, nil } From 8e571dff41a6f544d16dd494d2136c8bb97e66f6 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 27 Jan 2016 10:57:58 -0800 Subject: [PATCH 387/501] Add a CheckRedirect function to the HTTP client Use it to preserve Accept and Range headers that were added to the original request. Signed-off-by: Aaron Lehmann --- docs/client/repository.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 1f777add..d1bfc180 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -27,6 +27,26 @@ type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName == "Accept" || headerName == "Range" { + for _, val := range headerVals { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL) @@ -35,8 +55,9 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe } client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, } return ®istry{ @@ -105,7 +126,8 @@ func NewRepository(ctx context.Context, name reference.Named, baseURL string, tr } client := &http.Client{ - Transport: transport, + Transport: transport, + CheckRedirect: checkHTTPRedirect, // TODO(dmcgowan): create cookie jar } From badd8c49b6e65e8529cc4274105a8ee7be985382 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 28 Jan 2016 17:02:09 -0800 Subject: [PATCH 388/501] Update auth context keys to use constant Prevent using strings throughout the code to reference a string key defined in the auth package. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/auth/auth.go | 14 ++++++++++++-- docs/auth/htpasswd/access_test.go | 2 +- docs/auth/silly/access_test.go | 2 +- docs/auth/token/token_test.go | 2 +- docs/handlers/app.go | 2 +- docs/handlers/context.go | 3 ++- 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index b3bb580d..0ba2eba3 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -39,6 +39,16 @@ import ( "github.com/docker/distribution/context" ) +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + // UserInfo carries information about // an autenticated/authorized client. type UserInfo struct { @@ -102,9 +112,9 @@ type userInfoContext struct { func (uic userInfoContext) Value(key interface{}) interface{} { switch key { - case "auth.user": + case UserKey: return uic.user - case "auth.user.name": + case UserNameKey: return uic.user.Name } diff --git a/docs/auth/htpasswd/access_test.go b/docs/auth/htpasswd/access_test.go index db040547..553f05cf 100644 --- a/docs/auth/htpasswd/access_test.go +++ b/docs/auth/htpasswd/access_test.go @@ -56,7 +56,7 @@ func TestBasicAccessController(t *testing.T) { } } - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) if !ok { t.Fatal("basic accessController did not set auth.user context") } diff --git a/docs/auth/silly/access_test.go b/docs/auth/silly/access_test.go index ff2155b1..a7c14cb9 100644 --- a/docs/auth/silly/access_test.go +++ b/docs/auth/silly/access_test.go @@ -29,7 +29,7 @@ func TestSillyAccessController(t *testing.T) { } } - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) if !ok { t.Fatal("silly accessController did not set auth.user context") } diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go index 119aa738..6524e166 100644 --- a/docs/auth/token/token_test.go +++ b/docs/auth/token/token_test.go @@ -375,7 +375,7 @@ func TestAccessController(t *testing.T) { t.Fatalf("accessController returned unexpected error: %s", err) } - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) if !ok { t.Fatal("token accessController did not set auth.user context") } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 70b7417f..87c1e05a 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -588,7 +588,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) if app.nameRequired(r) { nameRef, err := reference.ParseNamed(getName(context)) diff --git a/docs/handlers/context.go b/docs/handlers/context.go index 85a17123..552db2df 100644 --- a/docs/handlers/context.go +++ b/docs/handlers/context.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" "golang.org/x/net/context" ) @@ -77,7 +78,7 @@ func getUploadUUID(ctx context.Context) (uuid string) { // getUserName attempts to resolve a username from the context and request. If // a username cannot be resolved, the empty string is returned. func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, "auth.user.name") + username := ctxu.GetStringValue(ctx, auth.UserNameKey) // Fallback to request user with basic auth if username == "" { From f41a408e346c5815f8d8144b9db2d04fa86829ae Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 20 Jan 2016 16:40:58 -0800 Subject: [PATCH 389/501] Adds custom registry User-Agent header to s3 HTTP requests Uses docker/goamz instead of AdRoll/goamz Adds a registry UA string param to the storage parameters when constructing the storage driver for the registry App. This could be used by other storage drivers as well Signed-off-by: Brian Bland --- docs/handlers/app.go | 55 +++++++++++-------- .../middleware/cloudfront/middleware.go | 2 +- docs/storage/driver/s3/s3.go | 27 +++++++-- docs/storage/driver/s3/s3_test.go | 3 +- 4 files changed, 58 insertions(+), 29 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 23225493..6dabaca3 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" "os" + "runtime" "time" log "github.com/Sirupsen/logrus" @@ -30,6 +31,7 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/version" "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" @@ -83,12 +85,12 @@ type App struct { // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. -func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { +func NewApp(ctx context.Context, config *configuration.Configuration) *App { app := &App{ - Config: configuration, + Config: config, Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - isCache: configuration.Proxy.RemoteURL != "", + router: v2.RouterWithPrefix(config.HTTP.Prefix), + isCache: config.Proxy.RemoteURL != "", } // Register the handler dispatchers. @@ -102,8 +104,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + // override the storage driver's UA string for registry outbound HTTP requests + storageParams := config.Storage.Parameters() + if storageParams == nil { + storageParams = make(configuration.Parameters) + } + storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) + var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + app.driver, err = factory.Create(config.Storage.Type(), storageParams) if err != nil { // TODO(stevvooe): Move the creation of a service into a protected // method, where this is created lazily. Its status can be queried via @@ -112,7 +121,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } purgeConfig := uploadPurgeDefaultConfig() - if mc, ok := configuration.Storage["maintenance"]; ok { + if mc, ok := config.Storage["maintenance"]; ok { if v, ok := mc["uploadpurging"]; ok { purgeConfig, ok = v.(map[interface{}]interface{}) if !ok { @@ -135,15 +144,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) + app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) if err != nil { panic(err) } - app.configureSecret(configuration) - app.configureEvents(configuration) - app.configureRedis(configuration) - app.configureLogHook(configuration) + app.configureSecret(config) + app.configureEvents(config) + app.configureRedis(config) + app.configureLogHook(config) // Generate an ephemeral key to be used for signing converted manifests // for clients that don't support schema2. @@ -152,8 +161,8 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap panic(err) } - if configuration.HTTP.Host != "" { - u, err := url.Parse(configuration.HTTP.Host) + if config.HTTP.Host != "" { + u, err := url.Parse(config.HTTP.Host) if err != nil { panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) } @@ -167,7 +176,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } // configure deletion - if d, ok := configuration.Storage["delete"]; ok { + if d, ok := config.Storage["delete"]; ok { e, ok := d["enabled"] if ok { if deleteEnabled, ok := e.(bool); ok && deleteEnabled { @@ -178,7 +187,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap // configure redirects var redirectDisabled bool - if redirectConfig, ok := configuration.Storage["redirect"]; ok { + if redirectConfig, ok := config.Storage["redirect"]; ok { v := redirectConfig["disable"] switch v := v.(type) { case bool: @@ -194,7 +203,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { + if cc, ok := config.Storage["cache"]; ok { v, ok := cc["blobdescriptor"] if !ok { // Backwards compatible: "layerinfo" == "blobdescriptor" @@ -223,7 +232,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) } } } @@ -236,15 +245,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } } - app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) + app.registry, err = applyRegistryMiddleware(app.Context, app.registry, config.Middleware["registry"]) if err != nil { panic(err) } - authType := configuration.Auth.Type() + authType := config.Auth.Type() if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) if err != nil { panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } @@ -253,13 +262,13 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } // configure as a pull through cache - if configuration.Proxy.RemoteURL != "" { - app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if config.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) if err != nil { panic(err.Error()) } app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) } return app diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index 31c00afc..56edda3a 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -10,10 +10,10 @@ import ( "io/ioutil" "time" - "github.com/AdRoll/goamz/cloudfront" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/goamz/cloudfront" ) // cloudFrontStorageMiddleware provides an simple implementation of layerHandler that diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 7bb23a85..f09e5508 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -26,11 +26,12 @@ import ( "sync" "time" - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" "github.com/Sirupsen/logrus" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" @@ -58,6 +59,7 @@ type DriverParameters struct { V4Auth bool ChunkSize int64 RootDirectory string + UserAgent string } func init() { @@ -168,7 +170,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case int, uint, int32, uint32, uint64: chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) } if chunkSize < minChunkSize { @@ -181,6 +183,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { rootDirectory = "" } + userAgent, ok := parameters["useragent"] + if !ok { + userAgent = "" + } + params := DriverParameters{ fmt.Sprint(accessKey), fmt.Sprint(secretKey), @@ -191,6 +198,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { v4AuthBool, chunkSize, fmt.Sprint(rootDirectory), + fmt.Sprint(userAgent), } return New(params) @@ -209,7 +217,16 @@ func New(params DriverParameters) (*Driver, error) { } s3obj := s3.New(auth, params.Region) - bucket := s3obj.Bucket(params.Bucket) + + if params.UserAgent != "" { + s3obj.Client = &http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, + transport.NewHeaderRequestModifier(http.Header{ + http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, + }), + ), + } + } if params.V4Auth { s3obj.Signature = aws.V4Signature @@ -219,6 +236,8 @@ func New(params DriverParameters) (*Driver, error) { } } + bucket := s3obj.Bucket(params.Bucket) + // TODO Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new s3driver while another one is running on the same bucket. // multis, _, err := bucket.ListMulti("", "") diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index 70172a6d..86f433f3 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -6,10 +6,10 @@ import ( "strconv" "testing" - "github.com/AdRoll/goamz/aws" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" + "github.com/docker/goamz/aws" "gopkg.in/check.v1" ) @@ -69,6 +69,7 @@ func init() { v4AuthBool, minChunkSize, rootDirectory, + "", } return New(parameters) From 8e7910826e623687194301c79602f875c920c782 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 28 Jan 2016 15:48:49 -0800 Subject: [PATCH 390/501] Adds "storageclass" configuration parameter for S3 driver. Defaults to STANDARD, also supports REDUCED_REDUNDANCY. Signed-off-by: Brian Bland --- docs/storage/driver/s3/s3.go | 26 ++++++++++++++++++++++++-- docs/storage/driver/s3/s3_test.go | 2 ++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index f09e5508..83fd74f7 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -1,7 +1,7 @@ // Package s3 provides a storagedriver.StorageDriver implementation to // store blobs in Amazon S3 cloud storage. // -// This package leverages the AdRoll/goamz client library for interfacing with +// This package leverages the docker/goamz client library for interfacing with // s3. // // Because s3 is a key, value store the Stat call does not support last modification @@ -59,6 +59,7 @@ type DriverParameters struct { V4Auth bool ChunkSize int64 RootDirectory string + StorageClass s3.StorageClass UserAgent string } @@ -79,6 +80,7 @@ type driver struct { ChunkSize int64 Encrypt bool RootDirectory string + StorageClass s3.StorageClass pool sync.Pool // pool []byte buffers used for WriteStream zeros []byte // shared, zero-valued buffer used for WriteStream @@ -183,6 +185,21 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { rootDirectory = "" } + storageClass := s3.StandardStorage + storageClassParam, ok := parameters["storageclass"] + if ok { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) + if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) + } + storageClass = storageClassCasted + } + userAgent, ok := parameters["useragent"] if !ok { userAgent = "" @@ -198,6 +215,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { v4AuthBool, chunkSize, fmt.Sprint(rootDirectory), + storageClass, fmt.Sprint(userAgent), } @@ -259,6 +277,7 @@ func New(params DriverParameters) (*Driver, error) { ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, zeros: make([]byte, params.ChunkSize), } @@ -826,7 +845,10 @@ func hasCode(err error, code string) bool { } func (d *driver) getOptions() s3.Options { - return s3.Options{SSE: d.Encrypt} + return s3.Options{ + SSE: d.Encrypt, + StorageClass: d.StorageClass, + } } func getPermissions() s3.ACL { diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index 86f433f3..be099790 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -10,6 +10,7 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" "gopkg.in/check.v1" ) @@ -69,6 +70,7 @@ func init() { v4AuthBool, minChunkSize, rootDirectory, + s3.StandardStorage, "", } From a2ade36ecf84bf5f85902a2584db8bb8dc0f81c0 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 1 Feb 2016 15:34:36 -0800 Subject: [PATCH 391/501] Adds test for S3 storage class configuration option Signed-off-by: Brian Bland --- docs/storage/driver/s3/s3_test.go | 74 ++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3/s3_test.go index be099790..660d5350 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3/s3_test.go @@ -18,7 +18,7 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -var s3DriverConstructor func(rootDirectory string) (*Driver, error) +var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) var skipS3 func() string func init() { @@ -35,7 +35,7 @@ func init() { } defer os.Remove(root) - s3DriverConstructor = func(rootDirectory string) (*Driver, error) { + s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -70,7 +70,7 @@ func init() { v4AuthBool, minChunkSize, rootDirectory, - s3.StandardStorage, + storageClass, "", } @@ -86,7 +86,7 @@ func init() { } testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root) + return s3DriverConstructor(root, s3.StandardStorage) }, skipS3) } @@ -101,17 +101,17 @@ func TestEmptyRootList(t *testing.T) { } defer os.Remove(validRoot) - rootedDriver, err := s3DriverConstructor(validRoot) + rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) if err != nil { t.Fatalf("unexpected error creating rooted driver: %v", err) } - emptyRootDriver, err := s3DriverConstructor("") + emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) if err != nil { t.Fatalf("unexpected error creating empty root driver: %v", err) } - slashRootDriver, err := s3DriverConstructor("/") + slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) if err != nil { t.Fatalf("unexpected error creating slash root driver: %v", err) } @@ -139,3 +139,63 @@ func TestEmptyRootList(t *testing.T) { } } } + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { + t.Fatalf("unexpected storage class for standard file: %v", storageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { + t.Fatalf("unexpected storage class for standard file: %v", storageClass) + } +} From 95a50c7236f8d6d8a056f14ff098573bf1cb25b6 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 1 Feb 2016 17:03:41 -0800 Subject: [PATCH 392/501] Correct ErrAuthenticationFailure message This was "authentication failured". Change it to "authentication failure". Signed-off-by: Aaron Lehmann --- docs/auth/htpasswd/access.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index 82d3556d..6e7ba180 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -20,7 +20,7 @@ var ( ErrInvalidCredential = errors.New("invalid authorization credential") // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failured") + ErrAuthenticationFailure = errors.New("authentication failure") ) type accessController struct { From 091c12f86be0b7df6a039ad8cadb2e1909857fdb Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 3 Feb 2016 10:42:32 -0800 Subject: [PATCH 393/501] Print the correct token expiration time Signed-off-by: Richard Scothern --- docs/client/auth/session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 6b483c62..50a94a3d 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -285,9 +285,9 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon } if tr.ExpiresIn < minimumTokenLifetimeSeconds { - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { From bbf983c06186f244562b9ed39ee26b1dcb7cfcbb Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 3 Feb 2016 13:19:44 -0800 Subject: [PATCH 394/501] On redirect, only copy headers when they don't already exist in the redirected request A changeset under consideration for Go 1.7 would automatically copy headers on redirect. This change future-proofs our code so we won't make duplicate copies of the headers if net/http does it automatically in the future. Signed-off-by: Aaron Lehmann --- docs/client/repository.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 87067b99..b3cae847 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -36,8 +36,21 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error { if len(via) > 0 { for headerName, headerVals := range via[0].Header { - if headerName == "Accept" || headerName == "Range" { - for _, val := range headerVals { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { req.Header.Add(headerName, val) } } From 95b9c7281b9c067936ae096db191774410585ab1 Mon Sep 17 00:00:00 2001 From: yuzou Date: Thu, 4 Feb 2016 16:14:35 +0800 Subject: [PATCH 395/501] read the actual number of bytes according to the initial size. Signed-off-by: yuzou --- docs/storage/driver/testsuites/testsuites.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 5c34cca6..b178cb3d 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -1184,7 +1184,11 @@ func (rr *randReader) Read(p []byte) (n int, err error) { rr.m.Lock() defer rr.m.Unlock() - n = copy(p, randomContents(int64(len(p)))) + toread := int64(len(p)) + if toread > rr.r { + toread = rr.r + } + n = copy(p, randomContents(toread)) rr.r -= int64(n) if rr.r <= 0 { From 6158eb544d81dbc5ff03343dc6b90d5d516af6da Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 1 Feb 2016 13:47:34 -0800 Subject: [PATCH 396/501] Rename Name method of Repository to Named This makes code that gets the name as a string read like repo.Named().Name() instead of repo.Name().Name(). Requested in https://github.com/docker/docker/pull/19887#discussion_r51479753 Signed-off-by: Aaron Lehmann --- docs/client/repository.go | 4 ++-- docs/handlers/app_test.go | 4 ++-- docs/handlers/blobupload.go | 10 +++++----- docs/handlers/images.go | 6 +++--- docs/handlers/tags.go | 4 ++-- docs/proxy/proxyregistry.go | 2 +- docs/storage/blob_test.go | 2 +- docs/storage/blobwriter.go | 2 +- docs/storage/blobwriter_resumable.go | 4 ++-- docs/storage/linkedblobstore.go | 14 +++++++------- docs/storage/manifeststore.go | 2 +- docs/storage/registry.go | 2 +- docs/storage/signaturestore.go | 2 +- docs/storage/tagstore.go | 14 +++++++------- 14 files changed, 36 insertions(+), 36 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 87067b99..1e8c4fa9 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -146,7 +146,7 @@ type repository struct { name reference.Named } -func (r *repository) Name() reference.Named { +func (r *repository) Named() reference.Named { return r.name } @@ -179,7 +179,7 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService { client: r.client, ub: r.ub, context: r.context, - name: r.Name(), + name: r.Named(), } } diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index 907ae53a..b9e9d312 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -48,8 +48,8 @@ func TestAppDispatcher(t *testing.T) { varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context - if ctx.Repository.Name().Name() != getName(ctx) { - t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") + if ctx.Repository.Named().Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") } // Check that we have all that is expected diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index a42e57f6..e2c34d83 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -46,9 +46,9 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } buh.State = state - if state.Name != ctx.Repository.Name().Name() { + if state.Name != ctx.Repository.Named().Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } @@ -312,7 +312,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. } // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Name().Name() + buh.State.Name = buh.Repository.Named().Name() buh.State.UUID = buh.Upload.ID() buh.State.Offset = offset buh.State.StartedAt = buh.Upload.StartedAt() @@ -324,7 +324,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. } uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( - buh.Repository.Name(), buh.Upload.ID(), + buh.Repository.Named(), buh.Upload.ID(), url.Values{ "_state": []string{token}, }) @@ -372,7 +372,7 @@ func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string // created blob. A 201 Created is written as well as the canonical URL and // blob digest. func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { - ref, err := reference.WithDigest(buh.Repository.Name(), desc.Digest) + ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) if err != nil { return err } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 808ead54..b0c8f02e 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -174,10 +174,10 @@ func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2 return nil, err } - ref := imh.Repository.Name() + ref := imh.Repository.Named() if imh.Tag != "" { - ref, err = reference.WithTag(imh.Repository.Name(), imh.Tag) + ref, err = reference.WithTag(ref, imh.Tag) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) return nil, err @@ -289,7 +289,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } // Construct a canonical url for the uploaded manifest. - ref, err := reference.WithDigest(imh.Repository.Name(), imh.Digest) + ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) if err != nil { imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index 72c21bbe..fd661e66 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -40,7 +40,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name().Name()})) + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) default: th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } @@ -51,7 +51,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name().Name(), + Name: th.Repository.Named().Name(), Tags: tags, }); err != nil { th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index 43c1486e..6ea79ff6 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -179,7 +179,7 @@ func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { return pr.blobStore } -func (pr *proxiedRepository) Name() reference.Named { +func (pr *proxiedRepository) Named() reference.Named { return pr.name } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 246648b0..4a56784e 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -378,7 +378,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) } - canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest) + canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) if err != nil { t.Fatal(err) } diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index e485cc6d..2406c95a 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -326,7 +326,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name().Name(), + name: bw.blobStore.repository.Named().Name(), id: bw.id, }) diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index fc62bcc4..5ae29c54 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -113,7 +113,7 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name().String(), + name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, @@ -159,7 +159,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name().String(), + name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 0c0c622c..963d59d5 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -153,7 +153,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. startedAt := time.Now().UTC() path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name().Name(), + name: lbs.repository.Named().Name(), id: uuid, }) @@ -162,7 +162,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. } startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name().Name(), + name: lbs.repository.Named().Name(), id: uuid, }) @@ -182,7 +182,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name().Name(), + name: lbs.repository.Named().Name(), id: id, }) @@ -206,7 +206,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution } path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name().Name(), + name: lbs.repository.Named().Name(), id: id, }) @@ -298,7 +298,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution } seenDigests[dgst] = struct{}{} - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return err } @@ -368,7 +368,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return err } @@ -391,7 +391,7 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return "", err } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 33c0c351..e259af48 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -77,7 +77,7 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. if err != nil { if err == distribution.ErrBlobUnknown { return nil, distribution.ErrManifestUnknownRevision{ - Name: ms.repository.Name().Name(), + Name: ms.repository.Named().Name(), Revision: dgst, } } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index be570cbc..1870e698 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -134,7 +134,7 @@ type repository struct { } // Name returns the name of the repository. -func (repo *repository) Name() reference.Named { +func (repo *repository) Named() reference.Named { return repo.name } diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go index 205d6009..2940e041 100644 --- a/docs/storage/signaturestore.go +++ b/docs/storage/signaturestore.go @@ -16,7 +16,7 @@ type signatureStore struct { func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name().Name(), + name: s.repository.Named().Name(), revision: dgst, }) diff --git a/docs/storage/tagstore.go b/docs/storage/tagstore.go index 8381d244..4386ffca 100644 --- a/docs/storage/tagstore.go +++ b/docs/storage/tagstore.go @@ -26,7 +26,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { var tags []string pathSpec, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), }) if err != nil { return tags, err @@ -36,7 +36,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()} + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} default: return tags, err } @@ -53,7 +53,7 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -73,7 +73,7 @@ func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { // the current tag. The digest must point to a manifest. func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -95,7 +95,7 @@ func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descr // resolve the current revision for name and tag. func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -119,7 +119,7 @@ func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descripto // Untag removes the tag association func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -172,7 +172,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ var tags []string for _, tag := range allTags { tagLinkPathSpec := manifestTagCurrentPathSpec{ - name: ts.repository.Name().Name(), + name: ts.repository.Named().Name(), tag: tag, } From 9894643c885f29c381b97e5f53905db3a8c46202 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Thu, 4 Feb 2016 17:32:55 -0800 Subject: [PATCH 397/501] Correct type for repo reference Signed-off-by: Richard Scothern --- docs/storage/blob_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 701a14ed..1e5b408c 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -22,7 +22,7 @@ import ( // obtained using Seek func TestWriteSeek(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { From c89f5b3775ad54fd93b6398cf6f2aa62970d4c17 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 18 Jan 2016 10:19:36 -0800 Subject: [PATCH 398/501] Add information about manifest content types to API spec Bring the spec up to date for schema2 changes. Signed-off-by: Aaron Lehmann --- docs/api/v2/descriptors.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index ad3da3ef..db52ba2e 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -514,7 +514,7 @@ var routeDescriptors = []RouteDescriptor{ digestHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, }, @@ -553,7 +553,7 @@ var routeDescriptors = []RouteDescriptor{ referenceParameterDescriptor, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, Successes: []ResponseDescriptor{ From 4bb5f808857ad85065036e42d3a808f741f16970 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 2 Feb 2016 19:30:48 -0800 Subject: [PATCH 399/501] Improves flexibility of configuration handling for S3 driver Treats nil parameters the same as unprovided parameters (fixes issues where certain parameters are printed to ""). Accepts "true" and "false" string values for boolean parameters. Signed-off-by: Brian Bland --- docs/storage/driver/s3/s3.go | 115 +++++++++++++++++++++-------------- 1 file changed, 70 insertions(+), 45 deletions(-) diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3/s3.go index 83fd74f7..a1f4c57d 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3/s3.go @@ -107,17 +107,18 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating // with an IAM on an ec2 instance (in which case the instance credentials will // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { + accessKey := parameters["accesskey"] + if accessKey == nil { accessKey = "" } - secretKey, ok := parameters["secretkey"] - if !ok { + + secretKey := parameters["secretkey"] + if secretKey == nil { secretKey = "" } - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { + regionName := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } region := aws.GetRegion(fmt.Sprint(regionName)) @@ -125,69 +126,93 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("Invalid region provided: %v", region) } - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { return nil, fmt.Errorf("The encrypt parameter should be a boolean") } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") } secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { return nil, fmt.Errorf("The secure parameter should be a boolean") } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") } v4AuthBool := false - v4Auth, ok := parameters["v4auth"] - if ok { - v4AuthBool, ok = v4Auth.(bool) - if !ok { + v4Auth := parameters["v4auth"] + switch v4Auth := v4Auth.(type) { + case string: + b, err := strconv.ParseBool(v4Auth) + if err != nil { return nil, fmt.Errorf("The v4auth parameter should be a boolean") } + v4AuthBool = b + case bool: + v4AuthBool = v4Auth + case nil: + // do nothing + default: + return nil, fmt.Errorf("The v4auth parameter should be a boolean") } chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) } - rootDirectory, ok := parameters["rootdirectory"] - if !ok { + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { rootDirectory = "" } storageClass := s3.StandardStorage - storageClassParam, ok := parameters["storageclass"] - if ok { + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { storageClassString, ok := storageClassParam.(string) if !ok { return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) @@ -200,8 +225,8 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { storageClass = storageClassCasted } - userAgent, ok := parameters["useragent"] - if !ok { + userAgent := parameters["useragent"] + if userAgent == nil { userAgent = "" } From ae59517936a34586c6d244e272a82288f6511d6d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 9 Feb 2016 18:28:43 -0800 Subject: [PATCH 400/501] Fix schema1 manifest etag and docker content digest header When schema2 manifests are rewritten as schema1 currently the etag and docker content digest header keep the value for the schema2 manifest. Fixes #1444 Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/handlers/api_test.go | 62 +++++++++++++++++++++++++-------------- docs/handlers/images.go | 1 + 2 files changed, 41 insertions(+), 22 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 5fffaa5a..1f18173f 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1378,19 +1378,28 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name } defer resp.Body.Close() - checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedSchema1Manifest schema1.SignedManifest - dec = json.NewDecoder(resp.Body) - - if err := dec.Decode(&fetchedSchema1Manifest); err != nil { - t.Fatalf("error decoding fetched schema1 manifest: %v", err) + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) } + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { t.Fatal("wrong schema version") } @@ -1603,19 +1612,28 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) } defer resp.Body.Close() - checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, - }) - - var fetchedSchema1Manifest schema1.SignedManifest - dec = json.NewDecoder(resp.Body) - - if err := dec.Decode(&fetchedSchema1Manifest); err != nil { - t.Fatalf("error decoding fetched schema1 manifest: %v", err) + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) } + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { t.Fatal("wrong schema version") } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 808ead54..b41037ba 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -196,6 +196,7 @@ func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2 imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } + imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) return manifest, nil } From 956ece5c70133efd9c39eda72978c15ad83394ed Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 10 Feb 2016 15:20:39 -0800 Subject: [PATCH 401/501] Add option to disable signatures Add option for specifying trust key for signing schema1 manifests. Since schema1 signature key identifiers are not verified anywhere and deprecated, storing signatures is no longer a requirement. Furthermore in schema2 there is no signature, requiring the registry to already add signatures to generated schema1 manifests. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/handlers/app.go | 22 +++++++--- docs/storage/manifeststore_test.go | 61 ++++++++++++++++++++------- docs/storage/registry.go | 26 +++++++++++- docs/storage/signedmanifesthandler.go | 39 ++++++++++++----- 4 files changed, 115 insertions(+), 33 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index ed925a45..370f63ef 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -155,11 +155,18 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.configureRedis(config) app.configureLogHook(config) - // Generate an ephemeral key to be used for signing converted manifests - // for clients that don't support schema2. - app.trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) + if config.Compatibility.Schema1.TrustKey != "" { + app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) + if err != nil { + panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) + } + } else { + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } } if config.HTTP.Host != "" { @@ -176,6 +183,11 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { options = append(options, storage.DisableDigestResumption) } + if config.Compatibility.Schema1.DisableSignatureStore { + options = append(options, storage.DisableSchema1Signatures) + options = append(options, storage.Schema1SigningKey(app.trustKey)) + } + // configure deletion if d, ok := config.Storage["delete"]; ok { e, ok := d["enabled"] diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index 7885c466..fcb5adf9 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -28,11 +28,10 @@ type manifestStoreTestEnv struct { tag string } -func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv { +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( - memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + registry, err := NewRegistry(ctx, driver, options...) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -53,13 +52,26 @@ func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *ma } func TestManifestStorage(t *testing.T) { + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) +} + +func TestManifestStorageDisabledSignatures(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, DisableSchema1Signatures, Schema1SigningKey(k)) +} + +func testManifestStorage(t *testing.T, options ...RegistryOption) { repoName, _ := reference.ParseNamed("foo/bar") - env := newManifestStoreTestEnv(t, repoName, "thetag") + env := newManifestStoreTestEnv(t, repoName, "thetag", options...) ctx := context.Background() ms, err := env.repository.Manifests(ctx) if err != nil { t.Fatal(err) } + equalSignatures := env.registry.(*registry).schema1SignaturesEnabled m := schema1.Manifest{ Versioned: manifest.Versioned{ @@ -159,8 +171,14 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected manifest type from signedstore") } - if !reflect.DeepEqual(fetchedManifest, sm) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) + if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { + t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) + } + + if equalSignatures { + if !reflect.DeepEqual(fetchedManifest, sm) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest.Manifest, sm.Manifest) + } } _, pl, err := fetchedManifest.Payload() @@ -196,8 +214,19 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } - if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) + byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { + t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) + } + + if equalSignatures { + if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) + } } sigs, err := fetchedJWS.Signatures() @@ -286,14 +315,16 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("payloads are not equal") } - receivedSigs, err := receivedJWS.Signatures() - if err != nil { - t.Fatalf("error getting signatures: %v", err) - } + if equalSignatures { + receivedSigs, err := receivedJWS.Signatures() + if err != nil { + t.Fatalf("error getting signatures: %v", err) + } - for i, sig := range receivedSigs { - if !bytes.Equal(sig, expectedSigs[i]) { - t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + for i, sig := range receivedSigs { + if !bytes.Equal(sig, expectedSigs[i]) { + t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + } } } diff --git a/docs/storage/registry.go b/docs/storage/registry.go index be570cbc..26fadf02 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -6,6 +6,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" ) // registry is the top-level implementation of Registry for use in the storage @@ -17,6 +18,8 @@ type registry struct { blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool resumableDigestEnabled bool + schema1SignaturesEnabled bool + schema1SigningKey libtrust.PrivateKey } // RegistryOption is the type used for functional options for NewRegistry. @@ -43,6 +46,24 @@ func DisableDigestResumption(registry *registry) error { return nil } +// DisableSchema1Signatures is a functional option for NewRegistry. It disables +// signature storage and ensures all schema1 manifests will only be returned +// with a signature from a provided signing key. +func DisableSchema1Signatures(registry *registry) error { + registry.schema1SignaturesEnabled = false + return nil +} + +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// signing key for adding a signature to all schema1 manifests. This should be +// used in conjunction with disabling signature store. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + // BlobDescriptorCacheProvider returns a functional option for // NewRegistry. It creates a cached blob statter for use by the // registry. @@ -85,8 +106,9 @@ func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, option statter: statter, pathFn: bs.path, }, - statter: statter, - resumableDigestEnabled: true, + statter: statter, + resumableDigestEnabled: true, + schema1SignaturesEnabled: true, } for _, option := range options { diff --git a/docs/storage/signedmanifesthandler.go b/docs/storage/signedmanifesthandler.go index 02663226..8e13dd93 100644 --- a/docs/storage/signedmanifesthandler.go +++ b/docs/storage/signedmanifesthandler.go @@ -25,10 +25,17 @@ var _ ManifestHandler = &signedManifestHandler{} func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") - // Fetch the signatures for the manifest - signatures, err := ms.signatures.Get(dgst) - if err != nil { - return nil, err + + var ( + signatures [][]byte + err error + ) + if ms.repository.schema1SignaturesEnabled { + // Fetch the signatures for the manifest + signatures, err = ms.signatures.Get(dgst) + if err != nil { + return nil, err + } } jsig, err := libtrust.NewJSONSignature(content, signatures...) @@ -36,6 +43,14 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige return nil, err } + if ms.repository.schema1SigningKey != nil { + if err := jsig.Sign(ms.repository.schema1SigningKey); err != nil { + return nil, err + } + } else if !ms.repository.schema1SignaturesEnabled { + return nil, fmt.Errorf("missing signing key with signature store disabled") + } + // Extract the pretty JWS raw, err := jsig.PrettySignature("signatures") if err != nil { @@ -75,14 +90,16 @@ func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution. return "", err } - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } + if ms.repository.schema1SignaturesEnabled { + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } - if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { - return "", err + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } } return revision.Digest, nil From f77c82ebb36276ca350cb1592169b2dd1ceea589 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 10 Feb 2016 16:26:29 -0800 Subject: [PATCH 402/501] Typo fixes in comments Correct spelling of words in source code comments. Signed-off-by: Aaron Lehmann --- docs/api/v2/descriptors.go | 6 +++--- docs/api/v2/errors.go | 2 +- docs/handlers/api_test.go | 6 +++--- docs/handlers/app.go | 2 +- docs/handlers/helpers.go | 2 +- docs/storage/driver/gcs/gcs.go | 2 +- docs/storage/driver/storagedriver.go | 2 +- docs/storage/driver/testsuites/testsuites.go | 2 +- docs/storage/paths.go | 6 +++--- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index ad3da3ef..7549ccc3 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -271,7 +271,7 @@ type MethodDescriptor struct { // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particalar request. + // to provide quick context for the particular request. Name string // Description should cover the requests purpose, covering any details for @@ -303,14 +303,14 @@ type RequestDescriptor struct { // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particalar response. + // to provide quick context for the particular response. Name string // Description should provide a brief overview of the role of the // response. Description string - // StatusCode specifies the status recieved by this particular response. + // StatusCode specifies the status received by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. diff --git a/docs/api/v2/errors.go b/docs/api/v2/errors.go index ece52a2c..97d6923a 100644 --- a/docs/api/v2/errors.go +++ b/docs/api/v2/errors.go @@ -84,7 +84,7 @@ var ( }) // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. + // signature verification. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 5fffaa5a..ef0425aa 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1182,7 +1182,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name manifest.Config.Digest = sampleConfigDigest manifest.Config.Size = int64(len(sampleConfig)) - // The manifest should still be invalid, because its layer doesnt exist + // The manifest should still be invalid, because its layer doesn't exist resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) defer resp.Body.Close() checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) @@ -1415,7 +1415,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name t.Fatal("wrong number of History entries") } - // Don't check V1Compatibility fields becuase we're using randomly-generated + // Don't check V1Compatibility fields because we're using randomly-generated // layers. return args @@ -1641,7 +1641,7 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) t.Fatal("wrong number of History entries") } - // Don't check V1Compatibility fields becuase we're using randomly-generated + // Don't check V1Compatibility fields because we're using randomly-generated // layers. } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 370f63ef..2a60001f 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -405,7 +405,7 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { sinks = append(sinks, endpoint) } - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as + // NOTE(stevvooe): Moving to a new queuing implementation is as easy as // replacing broadcaster with a rabbitmq implementation. It's recommended // that the registry instances also act as the workers to keep deployment // simple. diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index 5a3c9984..fe44f557 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -35,7 +35,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // Read in the data, if any. copied, err := io.Copy(destWriter, r.Body) if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client + // Didn't receive as much content as expected. Did the client // disconnect during the request? If so, avoid returning a 400 // error to keep the logs cleaner. select { diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 0e3480f2..9de432b4 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -285,7 +285,7 @@ func (d *driver) WriteStream(context ctx.Context, path string, offset int64, rea if err != nil { return nn, err } - // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end + // wc was closed successfully, so the temporary part exists, schedule it for deletion at the end // of the function defer storageDeleteObject(gcsContext, d.bucket, partName) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index dc8bdc8d..d5e6fe9f 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -133,7 +133,7 @@ func (err InvalidOffsetError) Error() string { } // Error is a catch-all error type which captures an error string and -// the driver type on which it occured. +// the driver type on which it occurred. type Error struct { DriverName string Enclosed error diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index b178cb3d..43d0811d 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -158,7 +158,7 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - // only delete if file was succesfully written + // only delete if file was successfully written if err == nil { defer suite.deletePath(c, firstPart(filename)) } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 4d2d48c1..6ee54127 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -48,7 +48,7 @@ const ( // The storage backend layout is broken up into a content-addressable blob // store and repositories. The content-addressable blob store holds most data // throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controled through links from the +// content. Access to the blob store is controlled through links from the // repository to blobstore. // // A repository is made up of layers, manifests and tags. The layers component @@ -301,7 +301,7 @@ type manifestRevisionLinkPathSpec struct { func (manifestRevisionLinkPathSpec) pathSpec() {} -// manifestSignaturesPathSpec decribes the path components for the directory +// manifestSignaturesPathSpec describes the path components for the directory // containing all the signatures for the target blob. Entries are named with // the underlying key id. type manifestSignaturesPathSpec struct { @@ -311,7 +311,7 @@ type manifestSignaturesPathSpec struct { func (manifestSignaturesPathSpec) pathSpec() {} -// manifestSignatureLinkPathSpec decribes the path components used to look up +// manifestSignatureLinkPathSpec describes the path components used to look up // a signature file by the hash of its blob. type manifestSignatureLinkPathSpec struct { name string From cffb4bbbfd9bb31323fcadafc3f6f2120d74f769 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 10 Feb 2016 16:34:50 -0800 Subject: [PATCH 403/501] Export "no basic auth credentials" as an error value Making this an exported error value will allow users of the registry/client/auth module to have consistent behavior between authentication failures and cases where no credentials are provided. Signed-off-by: Aaron Lehmann --- docs/client/auth/session.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 50a94a3d..f4c7ade4 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -15,6 +15,10 @@ import ( "github.com/docker/distribution/registry/client/transport" ) +// ErrNoBasicAuthCredentials is returned if a request can't be authorized with +// basic auth due to lack of credentials. +var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -322,5 +326,5 @@ func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } } - return errors.New("no basic auth credentials") + return ErrNoBasicAuthCredentials } From d5a38e4c5f23e794e30b986cbb3b13b8d0bf5b87 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 21 Jan 2016 18:17:53 -0800 Subject: [PATCH 404/501] Adds new s3 driver using aws-sdk-go instead of goamz Keeps old s3 driver, renames to s3goamz, registers new s3 driver as both "s3" and "s3aws" Changes cloudfront middleware to use aws-sdk-go Signed-off-by: Brian Bland --- .../middleware/cloudfront/middleware.go | 41 +- docs/storage/driver/s3-aws/s3.go | 966 ++++++++++++++++++ docs/storage/driver/s3-aws/s3_test.go | 201 ++++ docs/storage/driver/{s3 => s3-goamz}/s3.go | 14 +- .../driver/{s3 => s3-goamz}/s3_test.go | 4 +- 5 files changed, 1204 insertions(+), 22 deletions(-) create mode 100644 docs/storage/driver/s3-aws/s3.go create mode 100644 docs/storage/driver/s3-aws/s3_test.go rename docs/storage/driver/{s3 => s3-goamz}/s3.go (98%) rename docs/storage/driver/{s3 => s3-goamz}/s3_test.go (98%) diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index 56edda3a..9162c09d 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -8,12 +8,14 @@ import ( "encoding/pem" "fmt" "io/ioutil" + "net/url" + "strings" "time" + "github.com/aws/aws-sdk-go/service/cloudfront/sign" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/docker/goamz/cloudfront" ) // cloudFrontStorageMiddleware provides an simple implementation of layerHandler that @@ -21,8 +23,9 @@ import ( // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration + urlSigner *sign.URLSigner + baseURL string + duration time.Duration } var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} @@ -33,15 +36,24 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { base, ok := options["baseurl"] if !ok { - return nil, fmt.Errorf("No baseurl provided") + return nil, fmt.Errorf("no baseurl provided") } baseURL, ok := base.(string) if !ok { return nil, fmt.Errorf("baseurl must be a string") } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } pk, ok := options["privatekey"] if !ok { - return nil, fmt.Errorf("No privatekey provided") + return nil, fmt.Errorf("no privatekey provided") } pkPath, ok := pk.(string) if !ok { @@ -49,7 +61,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o } kpid, ok := options["keypairid"] if !ok { - return nil, fmt.Errorf("No keypairid provided") + return nil, fmt.Errorf("no keypairid provided") } keypairID, ok := kpid.(string) if !ok { @@ -58,19 +70,19 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o pkBytes, err := ioutil.ReadFile(pkPath) if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + return nil, fmt.Errorf("failed to read privatekey file: %s", err) } block, _ := pem.Decode([]byte(pkBytes)) if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + return nil, fmt.Errorf("failed to decode private key as an rsa private key") } privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } - cf := cloudfront.New(baseURL, privateKey, keypairID) + urlSigner := sign.NewURLSigner(keypairID, privateKey) duration := 20 * time.Minute d, ok := options["duration"] @@ -81,13 +93,18 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o case string: dur, err := time.ParseDuration(d) if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) + return nil, fmt.Errorf("invalid duration: %s", err) } duration = dur } } - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + }, nil } // S3BucketKeyer is any type that is capable of returning the S3 bucket key @@ -106,7 +123,7 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, return lh.StorageDriver.URLFor(ctx, path, options) } - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) if err != nil { return "", err } diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go new file mode 100644 index 00000000..af62d3f0 --- /dev/null +++ b/docs/storage/driver/s3-aws/s3.go @@ -0,0 +1,966 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + StorageClass string + UserAgent string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + } { + validRegions[region] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + RootDirectory string + StorageClass string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskey"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["secretkey"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + _, ok = validRegions[region] + if !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam, ok := parameters["storageclass"] + if ok { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent, ok := parameters["useragent"] + if !ok { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + awsConfig := aws.NewConfig() + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + // awsConfig.WithMaxRetries(10) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + var partNumber int64 = 1 + bytesRead := 0 + var putErrChan chan error + parts := []*s3.CompletedPart{} + done := make(chan struct{}) // stopgap to free up waiting goroutines + + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return 0, err + } + + uploadID := resp.UploadId + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + _, err := d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + UploadId: uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + }) + if err != nil { + // TODO (brianbland): log errors here + d.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + UploadId: uploadID, + }) + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the s3 + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying s3 library should handle it, it doesn't seem to + // be part of the shouldRetry function (see AdRoll/goamz/s3). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(buf[0 : int64(bytesRead)+from]), + }) + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.S3.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + if err != nil { + if s3Err, ok := err.(awserr.Error); !ok || s3Err.Code() != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil && resp.ContentLength != nil { + currentLength = *resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), + CopySourceRange: aws.String("bytes=0-" + strconv.FormatInt(offset-1, 10)), + }) + if err != nil { + return 0, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.CopyPartResult.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(d.zeros), + }) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(buf), + }) + if err != nil { + return totalRead, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), + }) + if err != nil { + return 0, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.CopyPartResult.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + + for len(resp.Contents) > 0 { + for _, key := range resp.Contents { + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects, + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return nil + } + + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if d.Encrypt { + return aws.String("AES256") + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String("private") +} + +func (d *driver) getStorageClass() *string { + return aws.String(d.StorageClass) +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go new file mode 100644 index 00000000..6816e650 --- /dev/null +++ b/docs/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + encryptBool, + secureBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} diff --git a/docs/storage/driver/s3/s3.go b/docs/storage/driver/s3-goamz/s3.go similarity index 98% rename from docs/storage/driver/s3/s3.go rename to docs/storage/driver/s3-goamz/s3.go index a1f4c57d..9208965b 100644 --- a/docs/storage/driver/s3/s3.go +++ b/docs/storage/driver/s3-goamz/s3.go @@ -2,16 +2,14 @@ // store blobs in Amazon S3 cloud storage. // // This package leverages the docker/goamz client library for interfacing with -// s3. +// S3. It is intended to be deprecated in favor of the s3-aws driver +// implementation. // -// Because s3 is a key, value store the Stat call does not support last modification +// Because S3 is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. package s3 import ( @@ -37,7 +35,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "s3" +const driverName = "s3goamz" // minChunkSize defines the minimum multipart upload chunk size // S3 API requires multipart upload chunks to be at least 5MB diff --git a/docs/storage/driver/s3/s3_test.go b/docs/storage/driver/s3-goamz/s3_test.go similarity index 98% rename from docs/storage/driver/s3/s3_test.go rename to docs/storage/driver/s3-goamz/s3_test.go index 660d5350..352ec3f5 100644 --- a/docs/storage/driver/s3/s3_test.go +++ b/docs/storage/driver/s3-goamz/s3_test.go @@ -71,7 +71,7 @@ func init() { minChunkSize, rootDirectory, storageClass, - "", + driverName + "-test", } return New(parameters) @@ -196,6 +196,6 @@ func TestStorageClass(t *testing.T) { } defer resp.Body.Close() if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { - t.Fatalf("unexpected storage class for standard file: %v", storageClass) + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) } } From ad6a0735d22d6a0172c12acffc508b32049d66f7 Mon Sep 17 00:00:00 2001 From: liuchang0812 Date: Tue, 16 Feb 2016 11:17:09 +0800 Subject: [PATCH 405/501] closes #1461, enhance log message of oss driver Signed-off-by: liuchang0812 --- docs/storage/driver/oss/oss.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 67215bc2..03afdbe1 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -744,9 +744,9 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int } } logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) - testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("testURL: %s", testURL) - return testURL, nil + signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("signed URL: %s", signedURL) + return signedURL, nil } func (d *driver) ossPath(path string) string { From 7ca24a7f5a27949ff407fe9c8dea636606a118b4 Mon Sep 17 00:00:00 2001 From: liuchang0812 Date: Tue, 16 Feb 2016 11:42:09 +0800 Subject: [PATCH 406/501] fix gofmt Signed-off-by: liuchang0812 --- docs/storage/driver/oss/oss.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 03afdbe1..1ec04525 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -745,7 +745,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int } logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("signed URL: %s", signedURL) + logrus.Infof("signed URL: %s", signedURL) return signedURL, nil } From c21f4eb561496ebf7794b0375f6f3b6cfc6343bd Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 12 Feb 2016 17:15:19 -0800 Subject: [PATCH 407/501] Add credential authenticator interface Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/auth/auth.go | 14 ++++++++++++++ docs/auth/htpasswd/access.go | 19 +++++++------------ docs/auth/htpasswd/htpasswd.go | 6 ++++-- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index 0ba2eba3..0164246c 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -33,6 +33,7 @@ package auth import ( + "errors" "fmt" "net/http" @@ -49,6 +50,14 @@ const ( UserNameKey = "auth.user.name" ) +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failure") +) + // UserInfo carries information about // an autenticated/authorized client. type UserInfo struct { @@ -97,6 +106,11 @@ type AccessController interface { Authorized(ctx context.Context, access ...Access) (context.Context, error) } +// CredentialAuthenticator is an object which is able to validate credentials +type CredentialAuthenticator interface { + AuthenticateUser(username, password string) error +} + // WithUser returns a context with the authorized user info. func WithUser(ctx context.Context, user UserInfo) context.Context { return userInfoContext{ diff --git a/docs/auth/htpasswd/access.go b/docs/auth/htpasswd/access.go index 6e7ba180..4f71dc27 100644 --- a/docs/auth/htpasswd/access.go +++ b/docs/auth/htpasswd/access.go @@ -6,7 +6,6 @@ package htpasswd import ( - "errors" "fmt" "net/http" "os" @@ -15,14 +14,6 @@ import ( "github.com/docker/distribution/registry/auth" ) -var ( - // ErrInvalidCredential is returned when the auth token does not authenticate correctly. - ErrInvalidCredential = errors.New("invalid authorization credential") - - // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failure") -) - type accessController struct { realm string htpasswd *htpasswd @@ -65,21 +56,25 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut if !ok { return nil, &challenge{ realm: ac.realm, - err: ErrInvalidCredential, + err: auth.ErrInvalidCredential, } } - if err := ac.htpasswd.authenticateUser(username, password); err != nil { + if err := ac.AuthenticateUser(username, password); err != nil { context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, - err: ErrAuthenticationFailure, + err: auth.ErrAuthenticationFailure, } } return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil } +func (ac *accessController) AuthenticateUser(username, password string) error { + return ac.htpasswd.authenticateUser(username, password) +} + // challenge implements the auth.Challenge interface. type challenge struct { realm string diff --git a/docs/auth/htpasswd/htpasswd.go b/docs/auth/htpasswd/htpasswd.go index 494ad0a7..83f797f7 100644 --- a/docs/auth/htpasswd/htpasswd.go +++ b/docs/auth/htpasswd/htpasswd.go @@ -6,6 +6,8 @@ import ( "io" "strings" + "github.com/docker/distribution/registry/auth" + "golang.org/x/crypto/bcrypt" ) @@ -33,12 +35,12 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err // timing attack paranoia bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) - return ErrAuthenticationFailure + return auth.ErrAuthenticationFailure } err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) if err != nil { - return ErrAuthenticationFailure + return auth.ErrAuthenticationFailure } return nil From 7d16fee7a4f743312979e3625a08f82ec8053626 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 10 Feb 2016 18:07:28 -0800 Subject: [PATCH 408/501] To avoid any network use unless necessary, delay establishing authorization challenges with the upstream until any proxied data is found not to be local. Implement auth challenges behind an interface and add to unit tests. Also, remove a non-sensical unit test. Signed-off-by: Richard Scothern --- docs/proxy/proxyauth.go | 9 +-- docs/proxy/proxyblobstore.go | 9 +++ docs/proxy/proxyblobstore_test.go | 6 ++ docs/proxy/proxymanifeststore.go | 9 ++- docs/proxy/proxymanifeststore_test.go | 37 +++++++++++-- docs/proxy/proxyregistry.go | 80 +++++++++++++++++++++------ docs/proxy/proxytagservice.go | 27 +++++---- docs/proxy/proxytagservice_test.go | 23 +++++++- 8 files changed, 156 insertions(+), 44 deletions(-) diff --git a/docs/proxy/proxyauth.go b/docs/proxy/proxyauth.go index e4bec75a..bcfa7aab 100644 --- a/docs/proxy/proxyauth.go +++ b/docs/proxy/proxyauth.go @@ -8,6 +8,7 @@ import ( ) const tokenURL = "https://auth.docker.io/token" +const challengeHeader = "Docker-Distribution-Api-Version" type userpass struct { username string @@ -24,12 +25,8 @@ func (c credentials) Basic(u *url.URL) (string, string) { return up.username, up.password } -// ConfigureAuth authorizes with the upstream registry -func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { - if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { - return nil, err - } - +// ConfigureAuth stores credentials for challenge responses +func configureAuth(username, password string) (auth.CredentialStore, error) { creds := map[string]userpass{ tokenURL: { username: username, diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index 1d7dfbc6..5f1a9c50 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -22,6 +22,7 @@ type proxyBlobStore struct { remoteStore distribution.BlobService scheduler *scheduler.TTLExpirationScheduler repositoryName reference.Named + authChallenger authChallenger } var _ distribution.BlobStore = &proxyBlobStore{} @@ -121,6 +122,10 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, return nil } + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return err + } + mu.Lock() _, ok := inflight[dgst] if ok { @@ -162,6 +167,10 @@ func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distri return distribution.Descriptor{}, err } + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return distribution.Descriptor{}, err + } + return pbs.remoteStore.Stat(ctx, dgst) } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 3054ef0b..4d63aa42 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -168,6 +168,7 @@ func makeTestEnv(t *testing.T, name string) *testEnv { remoteStore: truthBlobs, localStore: localBlobs, scheduler: s, + authChallenger: &mockChallenger{}, } te := &testEnv{ @@ -242,6 +243,11 @@ func TestProxyStoreStat(t *testing.T) { if (*remoteStats)["stat"] != remoteBlobCount { t.Errorf("Unexpected remote stat count") } + + if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { + t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) + } + } func TestProxyStoreServeHighConcurrency(t *testing.T) { diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index 0b5532d4..b8109667 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -19,6 +19,7 @@ type proxyManifestStore struct { remoteManifests distribution.ManifestService repositoryName reference.Named scheduler *scheduler.TTLExpirationScheduler + authChallenger authChallenger } var _ distribution.ManifestService = &proxyManifestStore{} @@ -31,7 +32,9 @@ func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (b if exists { return true, nil } - + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return false, err + } return pms.remoteManifests.Exists(ctx, dgst) } @@ -41,6 +44,10 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio var fromRemote bool manifest, err := pms.localManifests.Get(ctx, dgst, options...) if err != nil { + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return nil, err + } + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) if err != nil { return nil, err diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 00f9daf9..e16fa6f5 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -2,6 +2,7 @@ package proxy import ( "io" + "sync" "testing" "github.com/docker/distribution" @@ -64,6 +65,20 @@ func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, } */ +type mockChallenger struct { + sync.Mutex + count int +} + +// Called for remote operations only +func (mc *mockChallenger) tryEstablishChallenges(context.Context) error { + mc.Lock() + defer mc.Unlock() + + mc.count++ + return nil +} + func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { nameRef, err := reference.ParseNamed(name) if err != nil { @@ -120,6 +135,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE remoteManifests: truthManifests, scheduler: s, repositoryName: nameRef, + authChallenger: &mockChallenger{}, }, } } @@ -198,6 +214,10 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) } + if env.manifests.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) + } + // Get - should succeed and pull manifest into local _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { @@ -212,6 +232,10 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Expected local put") } + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + // Stat - should only go to local exists, err = env.manifests.Exists(ctx, env.manifestDigest) if err != nil { @@ -225,17 +249,18 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Unexpected exists count") } - // Get - should get from remote, to test freshness + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + + // Get proxied - won't require another authchallenge _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } - if (*remoteStats)["get"] != 2 && (*remoteStats)["exists"] != 1 && (*localStats)["put"] != 1 { - t.Errorf("Unexpected get count") + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) } -} - -func TestProxyTagService(t *testing.T) { } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index 6ea79ff6..ae7086b5 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -1,10 +1,11 @@ package proxy import ( + "fmt" "net/http" "net/url" + "sync" - "fmt" "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" @@ -19,13 +20,10 @@ import ( // proxyingRegistry fetches content from a remote registry and caches it locally type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - - scheduler *scheduler.TTLExpirationScheduler - - remoteURL string - credentialStore auth.CredentialStore - challengeManager auth.ChallengeManager + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + remoteURL string + authChallenger authChallenger } // NewRegistryPullThroughCache creates a registry acting as a pull through cache @@ -93,18 +91,20 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name return nil, err } - challengeManager := auth.NewSimpleChallengeManager() - cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + cs, err := configureAuth(config.Username, config.Password) if err != nil { return nil, err } return &proxyingRegistry{ - embedded: registry, - scheduler: s, - challengeManager: challengeManager, - credentialStore: cs, - remoteURL: config.RemoteURL, + embedded: registry, + scheduler: s, + remoteURL: config.RemoteURL, + authChallenger: &remoteAuthChallenger{ + remoteURL: config.RemoteURL, + challengeManager: auth.NewSimpleChallengeManager(), + credentialStore: cs, + }, }, nil } @@ -117,8 +117,13 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la } func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { + hcm, ok := pr.authChallenger.(*remoteAuthChallenger) + if !ok { + return nil, fmt.Errorf("unexpected challenge manager type %T", pr.authChallenger) + } + tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name.Name(), "pull"))) + auth.NewAuthorizer(hcm.challengeManager, auth.NewTokenHandler(http.DefaultTransport, hcm.credentialStore, name.Name(), "pull"))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { @@ -145,6 +150,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named remoteStore: remoteRepo.Blobs(ctx), scheduler: pr.scheduler, repositoryName: name, + authChallenger: pr.authChallenger, }, manifests: &proxyManifestStore{ repositoryName: name, @@ -152,15 +158,53 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named remoteManifests: remoteManifests, ctx: ctx, scheduler: pr.scheduler, + authChallenger: pr.authChallenger, }, name: name, tags: &proxyTagService{ - localTags: localRepo.Tags(ctx), - remoteTags: remoteRepo.Tags(ctx), + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + authChallenger: pr.authChallenger, }, }, nil } +// authChallenger encapsulates a request to the upstream to establish credential challenges +type authChallenger interface { + tryEstablishChallenges(context.Context) error +} + +type remoteAuthChallenger struct { + remoteURL string + sync.Mutex + challengeManager auth.ChallengeManager + credentialStore auth.CredentialStore +} + +// tryEstablishChallenges will attempt to get a challenge types for the upstream if none currently exist +func (hcm *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + hcm.Lock() + defer hcm.Unlock() + + remoteURL := hcm.remoteURL + "/v2/" + challenges, err := hcm.challengeManager.GetChallenges(remoteURL) + if err != nil { + return err + } + + if len(challenges) > 0 { + return nil + } + + // establish challenge type with upstream + if err := ping(hcm.challengeManager, remoteURL, challengeHeader); err != nil { + return err + } + + context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, hcm.challengeManager) + return nil +} + // proxiedRepository uses proxying blob and manifest services to serve content // locally, or pulling it through from a remote and caching it locally if it doesn't // already exist diff --git a/docs/proxy/proxytagservice.go b/docs/proxy/proxytagservice.go index c52460c4..a8273030 100644 --- a/docs/proxy/proxytagservice.go +++ b/docs/proxy/proxytagservice.go @@ -7,8 +7,9 @@ import ( // proxyTagService supports local and remote lookup of tags. type proxyTagService struct { - localTags distribution.TagService - remoteTags distribution.TagService + localTags distribution.TagService + remoteTags distribution.TagService + authChallenger authChallenger } var _ distribution.TagService = proxyTagService{} @@ -17,16 +18,19 @@ var _ distribution.TagService = proxyTagService{} // tag service first and then caching it locally. If the remote is unavailable // the local association is returned func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - desc, err := pt.remoteTags.Get(ctx, tag) + err := pt.authChallenger.tryEstablishChallenges(ctx) if err == nil { - err := pt.localTags.Tag(ctx, tag, desc) - if err != nil { - return distribution.Descriptor{}, err + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil } - return desc, nil } - desc, err = pt.localTags.Get(ctx, tag) + desc, err := pt.localTags.Get(ctx, tag) if err != nil { return distribution.Descriptor{}, err } @@ -46,9 +50,12 @@ func (pt proxyTagService) Untag(ctx context.Context, tag string) error { } func (pt proxyTagService) All(ctx context.Context) ([]string, error) { - tags, err := pt.remoteTags.All(ctx) + err := pt.authChallenger.tryEstablishChallenges(ctx) if err == nil { - return tags, err + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } } return pt.localTags.All(ctx) } diff --git a/docs/proxy/proxytagservice_test.go b/docs/proxy/proxytagservice_test.go index 8d9518c0..a446645c 100644 --- a/docs/proxy/proxytagservice_test.go +++ b/docs/proxy/proxytagservice_test.go @@ -69,8 +69,9 @@ func testProxyTagService(local, remote map[string]distribution.Descriptor) *prox remote = make(map[string]distribution.Descriptor) } return &proxyTagService{ - localTags: &mockTagStore{mapping: local}, - remoteTags: &mockTagStore{mapping: remote}, + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + authChallenger: &mockChallenger{}, } } @@ -87,6 +88,10 @@ func TestGet(t *testing.T) { t.Fatal(err) } + if proxyTags.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) + } + if d != remoteDesc { t.Fatal("unable to get put tag") } @@ -112,6 +117,10 @@ func TestGet(t *testing.T) { t.Fatal(err) } + if proxyTags.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) + } + if d != newRemoteDesc { t.Fatal("unable to get put tag") } @@ -142,7 +151,11 @@ func TestGet(t *testing.T) { t.Fatal("untagged tag should be pulled through") } - // Add another tag. Ensure both tags appear in enumerate + if proxyTags.authChallenger.(*mockChallenger).count != 3 { + t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + // Add another tag. Ensure both tags appear in 'All' err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) if err != nil { t.Fatal(err) @@ -161,4 +174,8 @@ func TestGet(t *testing.T) { if all[0] != "funtag" && all[1] != "remote" { t.Fatalf("Unexpected tags returned from All() : %v ", all) } + + if proxyTags.authChallenger.(*mockChallenger).count != 4 { + t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) + } } From c6871737bc151f705aa30253b2fbb7f9209ad353 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 16 Feb 2016 17:48:07 -0800 Subject: [PATCH 409/501] [driver/s3aws] Fix TestStorageClass Fixes bug in TestStorageClass for s3aws driver where the "standard" file was checked for reduced-redundnancy storage. Signed-off-by: Brian Bland --- docs/storage/driver/s3-aws/s3_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go index 6816e650..1137b3e2 100644 --- a/docs/storage/driver/s3-aws/s3_test.go +++ b/docs/storage/driver/s3-aws/s3_test.go @@ -186,7 +186,7 @@ func TestStorageClass(t *testing.T) { rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ Bucket: aws.String(rrDriverUnwrapped.Bucket), - Key: aws.String(rrDriverUnwrapped.s3Path(standardFilename)), + Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), }) if err != nil { t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) From 20bc910cdf76831901e4dafaaa1e44cff162280b Mon Sep 17 00:00:00 2001 From: HuKeping Date: Wed, 17 Feb 2016 14:11:20 +0800 Subject: [PATCH 410/501] Cleanup: remove unused log Signed-off-by: Hu Keping --- docs/client/auth/session.go | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index f4c7ade4..a9b228c5 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -215,7 +215,6 @@ type tokenResponse struct { } func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { - //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { return nil, errors.New("no realm specified for token auth challenge") From 2e8244822c9efd25cbe735fad72175bc990354d3 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Wed, 17 Feb 2016 13:53:43 +0300 Subject: [PATCH 411/501] Fix description of StorageDriver.WriteStream Offset can be more than CurrentSize as long as this case is checked by DriverSuite.testContinueStreamAppend. Signed-off-by: Anton Tiurin --- docs/storage/driver/storagedriver.go | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index d5e6fe9f..603020f1 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -57,7 +57,6 @@ type StorageDriver interface { // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. - // The offset must be no larger than the CurrentSize for this path. WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) // Stat retrieves the FileInfo for the given path, including the current From 18fd1c07025a9aaff9c65044a58cb1445f96cbd7 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 17 Feb 2016 10:42:34 -0800 Subject: [PATCH 412/501] Extend authChallenger interface to remove type cast. Signed-off-by: Richard Scothern --- docs/proxy/proxyauth.go | 2 +- docs/proxy/proxymanifeststore_test.go | 17 ++++++++--- docs/proxy/proxyregistry.go | 43 ++++++++++++++++----------- 3 files changed, 39 insertions(+), 23 deletions(-) diff --git a/docs/proxy/proxyauth.go b/docs/proxy/proxyauth.go index bcfa7aab..6f0eb005 100644 --- a/docs/proxy/proxyauth.go +++ b/docs/proxy/proxyauth.go @@ -25,7 +25,7 @@ func (c credentials) Basic(u *url.URL) (string, string) { return up.username, up.password } -// ConfigureAuth stores credentials for challenge responses +// configureAuth stores credentials for challenge responses func configureAuth(username, password string) (auth.CredentialStore, error) { creds := map[string]userpass{ tokenURL: { diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index e16fa6f5..312eb343 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -71,11 +72,19 @@ type mockChallenger struct { } // Called for remote operations only -func (mc *mockChallenger) tryEstablishChallenges(context.Context) error { - mc.Lock() - defer mc.Unlock() +func (m *mockChallenger) tryEstablishChallenges(context.Context) error { + m.Lock() + defer m.Unlock() - mc.count++ + m.count++ + return nil +} + +func (m *mockChallenger) credentialStore() auth.CredentialStore { + return nil +} + +func (m *mockChallenger) challengeManager() auth.ChallengeManager { return nil } diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index ae7086b5..e25fe783 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -101,9 +101,9 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name scheduler: s, remoteURL: config.RemoteURL, authChallenger: &remoteAuthChallenger{ - remoteURL: config.RemoteURL, - challengeManager: auth.NewSimpleChallengeManager(), - credentialStore: cs, + remoteURL: config.RemoteURL, + cm: auth.NewSimpleChallengeManager(), + cs: cs, }, }, nil } @@ -117,13 +117,10 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la } func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { - hcm, ok := pr.authChallenger.(*remoteAuthChallenger) - if !ok { - return nil, fmt.Errorf("unexpected challenge manager type %T", pr.authChallenger) - } + c := pr.authChallenger tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(hcm.challengeManager, auth.NewTokenHandler(http.DefaultTransport, hcm.credentialStore, name.Name(), "pull"))) + auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { @@ -172,22 +169,32 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named // authChallenger encapsulates a request to the upstream to establish credential challenges type authChallenger interface { tryEstablishChallenges(context.Context) error + challengeManager() auth.ChallengeManager + credentialStore() auth.CredentialStore } type remoteAuthChallenger struct { remoteURL string sync.Mutex - challengeManager auth.ChallengeManager - credentialStore auth.CredentialStore + cm auth.ChallengeManager + cs auth.CredentialStore } -// tryEstablishChallenges will attempt to get a challenge types for the upstream if none currently exist -func (hcm *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { - hcm.Lock() - defer hcm.Unlock() +func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { + return r.cs +} - remoteURL := hcm.remoteURL + "/v2/" - challenges, err := hcm.challengeManager.GetChallenges(remoteURL) +func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager { + return r.cm +} + +// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist +func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + r.Lock() + defer r.Unlock() + + remoteURL := r.remoteURL + "/v2/" + challenges, err := r.cm.GetChallenges(remoteURL) if err != nil { return err } @@ -197,11 +204,11 @@ func (hcm *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) err } // establish challenge type with upstream - if err := ping(hcm.challengeManager, remoteURL, challengeHeader); err != nil { + if err := ping(r.cm, remoteURL, challengeHeader); err != nil { return err } - context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, hcm.challengeManager) + context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) return nil } From c58aa8a50a6def6855fa01b13e210450454c6c25 Mon Sep 17 00:00:00 2001 From: xiekeyang Date: Mon, 22 Feb 2016 15:24:49 +0800 Subject: [PATCH 413/501] compare error output in tagstore unit test Signed-off-by: xiekeyang --- docs/storage/tagstore_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/storage/tagstore_test.go b/docs/storage/tagstore_test.go index 52873a69..554a46bf 100644 --- a/docs/storage/tagstore_test.go +++ b/docs/storage/tagstore_test.go @@ -98,8 +98,9 @@ func TestTagStoreUnTag(t *testing.T) { t.Error(err) } + errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() _, err = tags.Get(ctx, "latest") - if err == nil { + if err == nil || err.Error() != errExpect { t.Error("Expected error getting untagged tag") } } From 776e01f8bc794bb4e6d0256930b1f1ce18691560 Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Thu, 18 Feb 2016 19:37:03 +0100 Subject: [PATCH 414/501] Defined ErrAccessDenied error Middleware code may perform additional checks on blobs written. Allow it to return access denied errors that will result in 403 Forbidden. Signed-off-by: Michal Minar --- docs/handlers/blobupload.go | 2 ++ docs/handlers/images.go | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index e2c34d83..bfeddb03 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -253,6 +253,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) default: switch err { + case distribution.ErrAccessDenied: + buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied) case distribution.ErrUnsupported: buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: diff --git a/docs/handlers/images.go b/docs/handlers/images.go index f5c9eada..8ef7197a 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -253,6 +253,10 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) return } + if err == distribution.ErrAccessDenied { + imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) + return + } switch err := err.(type) { case distribution.ErrManifestVerification: for _, verificationError := range err { From 29e0411f001abd373ff4bacfaae3119f05557944 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 17 Feb 2016 16:32:23 -0800 Subject: [PATCH 415/501] Enable proxying registries to downgrade fetched manifests to Schema 1. Ensure Accept headers are sent with TagService.Get (which hits manifest endpoints). Add support for remote Get and Put for the proxied blobstore. Signed-off-by: Richard Scothern --- docs/client/repository.go | 24 +++++++++++++++++++--- docs/proxy/proxyblobstore.go | 26 +++++++++++++++++++---- docs/proxy/proxyblobstore_test.go | 34 +++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 7 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index ebf44d47..830749f1 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -292,9 +292,18 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er if err != nil { return distribution.Descriptor{}, err } - var attempts int - resp, err := t.client.Head(u) + req, err := http.NewRequest("HEAD", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + var attempts int + resp, err := t.client.Do(req) check: if err != nil { return distribution.Descriptor{}, err @@ -304,7 +313,16 @@ check: case resp.StatusCode >= 200 && resp.StatusCode < 400: return descriptorFromResponse(resp) case resp.StatusCode == http.StatusMethodNotAllowed: - resp, err = t.client.Get(u) + req, err = http.NewRequest("GET", u, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + resp, err = t.client.Do(req) attempts++ if attempts > 1 { return distribution.Descriptor{}, err diff --git a/docs/proxy/proxyblobstore.go b/docs/proxy/proxyblobstore.go index 5f1a9c50..7a6d7ea2 100644 --- a/docs/proxy/proxyblobstore.go +++ b/docs/proxy/proxyblobstore.go @@ -174,6 +174,28 @@ func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distri return pbs.remoteStore.Stat(ctx, dgst) } +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + blob, err := pbs.localStore.Get(ctx, dgst) + if err == nil { + return blob, nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return []byte{}, err + } + + blob, err = pbs.remoteStore.Get(ctx, dgst) + if err != nil { + return []byte{}, err + } + + _, err = pbs.localStore.Put(ctx, "", blob) + if err != nil { + return []byte{}, err + } + return blob, nil +} + // Unsupported functions func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported @@ -195,10 +217,6 @@ func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distri return nil, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - return nil, distribution.ErrUnsupported -} - func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index 4d63aa42..b93b5343 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -218,6 +218,40 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { te.inRemote = inRemote te.numUnique = numUnique } +func TestProxyStoreGet(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + populate(t, te, 1, 10, 1) + _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + + _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + +} func TestProxyStoreStat(t *testing.T) { te := makeTestEnv(t, "foo/bar") From ecc560f46f1f63556796fa0ff30bcd52030f514a Mon Sep 17 00:00:00 2001 From: Michal Minar Date: Thu, 18 Feb 2016 19:20:53 +0100 Subject: [PATCH 416/501] Commit blob with known size Signed-off-by: Michal Minar --- docs/handlers/blobupload.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index e2c34d83..31a3367f 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -239,12 +239,18 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } + size := buh.State.Offset + if offset, err := buh.Upload.Seek(0, os.SEEK_CUR); err == nil { + size = offset + } + desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ Digest: dgst, + Size: size, // TODO(stevvooe): This isn't wildly important yet, but we should - // really set the length and mediatype. For now, we can let the - // backend take care of this. + // really set the mediatype. For now, we can let the backend take care + // of this. }) if err != nil { From d16f3046c686b769011ae1ef9d1d22af724ba321 Mon Sep 17 00:00:00 2001 From: Stefan Weil Date: Tue, 23 Feb 2016 22:33:38 +0100 Subject: [PATCH 417/501] Fix some typos in comments and strings All of them were found and fixed by codespell. Signed-off-by: Stefan Weil --- docs/auth/token/token_test.go | 2 +- docs/client/transport/http_reader.go | 2 +- docs/proxy/proxymanifeststore_test.go | 2 +- docs/proxy/scheduler/scheduler_test.go | 4 ++-- docs/storage/driver/testsuites/testsuites.go | 2 +- docs/storage/walk_test.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/auth/token/token_test.go b/docs/auth/token/token_test.go index cd985770..827dbbd7 100644 --- a/docs/auth/token/token_test.go +++ b/docs/auth/token/token_test.go @@ -94,7 +94,7 @@ func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.Publi func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { signingKey, err := makeSigningKeyWithChain(rootKey, depth) if err != nil { - return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + return nil, fmt.Errorf("unable to make signing key with chain: %s", err) } var rawJWK json.RawMessage diff --git a/docs/client/transport/http_reader.go b/docs/client/transport/http_reader.go index 22b0b9d6..e1b17a03 100644 --- a/docs/client/transport/http_reader.go +++ b/docs/client/transport/http_reader.go @@ -66,7 +66,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } - // If we seeked to a different position, we need to reset the + // If we sought to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 312eb343..1069d66c 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -213,7 +213,7 @@ func TestProxyManifests(t *testing.T) { // Stat - must check local and remote exists, err := env.manifests.Exists(ctx, env.manifestDigest) if err != nil { - t.Fatalf("Error checking existance") + t.Fatalf("Error checking existence") } if !exists { t.Errorf("Unexpected non-existant manifest") diff --git a/docs/proxy/scheduler/scheduler_test.go b/docs/proxy/scheduler/scheduler_test.go index d4edd1b1..556f5204 100644 --- a/docs/proxy/scheduler/scheduler_test.go +++ b/docs/proxy/scheduler/scheduler_test.go @@ -45,7 +45,7 @@ func TestSchedule(t *testing.T) { } _, ok := remainingRepos[repoName.String()] if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + t.Fatalf("Trying to remove nonexistent repo: %s", repoName) } t.Log("removing", repoName) delete(remainingRepos, repoName.String()) @@ -86,7 +86,7 @@ func TestRestoreOld(t *testing.T) { } _, ok := remainingRepos[r.String()] if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", r) + t.Fatalf("Trying to remove nonexistent repo: %s", r) } delete(remainingRepos, r.String()) return nil diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 43d0811d..3ff4e1e6 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -924,7 +924,7 @@ func (suite *DriverSuite) TestEventualConsistency(c *check.C) { } if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") + c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") } c.Assert(misswrites, check.Not(check.Equals), 1024) diff --git a/docs/storage/walk_test.go b/docs/storage/walk_test.go index 42f67dba..3d7a4b1b 100644 --- a/docs/storage/walk_test.go +++ b/docs/storage/walk_test.go @@ -73,7 +73,7 @@ func TestWalkErrors(t *testing.T) { } } - err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { From b7d3424103a59f33ccdcda7019889dc54934119a Mon Sep 17 00:00:00 2001 From: Andrew T Nguyen Date: Tue, 19 Jan 2016 14:26:15 -0800 Subject: [PATCH 418/501] Implements garbage collection subcommand - Includes a change in the command to run the registry. The registry server itself is now started up as a subcommand. - Includes changes to the high level interfaces to support enumeration of various registry objects. Signed-off-by: Andrew T Nguyen --- docs/garbagecollect.go | 150 ++++++++++++++ docs/garbagecollect_test.go | 343 +++++++++++++++++++++++++++++++ docs/proxy/proxymanifeststore.go | 5 - docs/proxy/proxyregistry.go | 8 + docs/registry.go | 20 +- docs/root.go | 28 +++ docs/storage/blobstore.go | 32 +++ docs/storage/catalog.go | 31 +++ docs/storage/linkedblobstore.go | 53 +++++ docs/storage/manifeststore.go | 51 ++++- docs/storage/paths.go | 59 +++++- docs/storage/paths_test.go | 28 +++ docs/storage/registry.go | 13 +- docs/storage/vacuum.go | 4 +- 14 files changed, 796 insertions(+), 29 deletions(-) create mode 100644 docs/garbagecollect.go create mode 100644 docs/garbagecollect_test.go create mode 100644 docs/root.go diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go new file mode 100644 index 00000000..5e165aea --- /dev/null +++ b/docs/garbagecollect.go @@ -0,0 +1,150 @@ +package registry + +import ( + "fmt" + "os" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + + "github.com/spf13/cobra" +) + +func markAndSweep(storageDriver driver.StorageDriver) error { + ctx := context.Background() + + // Construct a registry + registry, err := storage.NewRegistry(ctx, storageDriver) + if err != nil { + return fmt.Errorf("failed to construct registry: %v", err) + } + + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("coercion error: unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + err = repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + var err error + named, err := reference.ParseNamed(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("coercion error: unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + // Mark the manifest's blob + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + } + + switch manifest.(type) { + case *schema1.SignedManifest: + signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) + if !ok { + return fmt.Errorf("coercion error: unable to convert ManifestSErvice into SignaturesGetter") + } + signatures, err := signaturesGetter.GetSignatures(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to get signatures for signed manifest: %v", err) + } + for _, signatureDigest := range signatures { + markSet[signatureDigest] = struct{}{} + } + break + case *schema2.DeserializedManifest: + config := manifest.(*schema2.DeserializedManifest).Config + markSet[config.Digest] = struct{}{} + break + } + + return nil + }) + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v\n", err) + } + + // sweep + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + + // Construct vacuum + vacuum := storage.NewVacuum(ctx, storageDriver) + for dgst := range deleteSet { + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err) + } + } + + return err +} + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collects` deletes layers not referenced by any manifests", + Long: "`garbage-collects` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + err = markAndSweep(driver) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/docs/garbagecollect_test.go b/docs/garbagecollect_test.go new file mode 100644 index 00000000..951a9e81 --- /dev/null +++ b/docs/garbagecollect_test.go @@ -0,0 +1,343 @@ +package registry + +import ( + "io" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +type image struct { + manifest distribution.Manifest + manifestDigest digest.Digest + layers map[digest.Digest]io.ReadSeeker +} + +func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace { + ctx := context.Background() + registry, err := storage.NewRegistry(ctx, driver, storage.EnableDelete) + if err != nil { + t.Fatalf("Failed to construct namespace") + } + return registry +} + +func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { + ctx := context.Background() + + // Initialize a dummy repository + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("Failed to parse name %s: %v", name, err) + } + + repo, err := registry.Repository(ctx, named) + if err != nil { + t.Fatalf("Failed to construct repository: %v", err) + } + return repo +} + +func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { + ctx := context.Background() + + manifestService, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf("Failed to construct manifest store: %v", err) + } + return manifestService +} + +func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { + ctx := context.Background() + blobService := registry.Blobs() + allBlobsMap := make(map[digest.Digest]struct{}) + err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { + allBlobsMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all blobs: %v", err) + } + return allBlobsMap +} + +func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { + // upload layers + err := testutil.UploadBlobs(repository, im.layers) + if err != nil { + t.Fatalf("layer upload failed: %v", err) + } + + // upload manifest + ctx := context.Background() + manifestService := makeManifestService(t, repository) + manifestDigest, err := manifestService.Put(ctx, im.manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + return manifestDigest +} + +func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema1Manifest(digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema2Manifest(repository, digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func TestNoDeletionNoEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "palailogos") + manifestService, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + // construct manifestlist for fun. + blobstatter := registry.BlobStatter() + manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ + image1.manifestDigest, image2.manifestDigest}) + if err != nil { + t.Fatalf("Failed to make manifest list: %v", err) + } + + _, err = manifestService.Put(ctx, manifestList) + if err != nil { + t.Fatalf("Failed to add manifest list: %v", err) + } + + // Run GC + err = markAndSweep(inmemoryDriver) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // the +1 at the end is for the manifestList + // the first +3 at the end for each manifest's blob + // the second +3 at the end for each manifest's signature/config layer + totalBlobCount := len(image1.layers) + len(image2.layers) + len(image3.layers) + 1 + 3 + 3 + if len(blobs) != totalBlobCount { + t.Fatalf("Garbage collection affected storage") + } +} + +func TestDeletionHasEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "komnenos") + manifests, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + manifests.Delete(ctx, image2.manifestDigest) + manifests.Delete(ctx, image3.manifestDigest) + + // Run GC + err = markAndSweep(inmemoryDriver) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that the image1 manifest and all the layers are still in blobs + if _, ok := blobs[image1.manifestDigest]; !ok { + t.Fatalf("First manifest is missing") + } + + for layer := range image1.layers { + if _, ok := blobs[layer]; !ok { + t.Fatalf("manifest 1 layer is missing: %v", layer) + } + } + + // check that image2 and image3 layers are not still around + for layer := range image2.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 2 layer is present: %v", layer) + } + } + + for layer := range image3.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 3 layer is present: %v", layer) + } + } +} + +func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { + for d = range digests { + break + } + return +} + +func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return +} + +func TestDeletionWithSharedLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "tzimiskes") + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + sharedKey := getAnyKey(randomLayers1) + manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifestService := makeManifestService(t, repo) + + // Upload manifests + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestDigest2, err := manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + // delete + err = manifestService.Delete(ctx, manifestDigest2) + if err != nil { + t.Fatalf("manifest deletion failed: %v", err) + } + + // check that all of the layers in layer 1 are still there + blobs := allBlobs(t, registry) + for dgst := range randomLayers1 { + if _, ok := blobs[dgst]; !ok { + t.Fatalf("random layer 1 blob missing: %v", dgst) + } + } +} + +func TestOrphanBlobDeleted(t *testing.T) { + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "michael_z_doukas") + + digests, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatalf("Failed to create random digest: %v", err) + } + + if err = testutil.UploadBlobs(repo, digests); err != nil { + t.Fatalf("Failed to upload blob: %v", err) + } + + // formality to create the necessary directories + uploadRandomSchema2Image(t, repo) + + // Run GC + err = markAndSweep(inmemoryDriver) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that orphan blob layers are not still around + for dgst := range digests { + if _, ok := blobs[dgst]; ok { + t.Fatalf("Orphan layer is present: %v", dgst) + } + } +} diff --git a/docs/proxy/proxymanifeststore.go b/docs/proxy/proxymanifeststore.go index b8109667..f08e285d 100644 --- a/docs/proxy/proxymanifeststore.go +++ b/docs/proxy/proxymanifeststore.go @@ -93,8 +93,3 @@ func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Man func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } - -/*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - return 0, distribution.ErrUnsupported -} -*/ diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index e25fe783..1663ab69 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -166,6 +166,14 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named }, nil } +func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { + return pr.embedded.Blobs() +} + +func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { + return pr.embedded.BlobStatter() +} + // authChallenger encapsulates a request to the upstream to establish credential challenges type authChallenger interface { tryEstablishChallenges(context.Context) error diff --git a/docs/registry.go b/docs/registry.go index 86cb6a17..a1ba3b1a 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -24,16 +24,12 @@ import ( "github.com/yvasiyarov/gorelic" ) -// Cmd is a cobra command for running the registry. -var Cmd = &cobra.Command{ - Use: "registry ", - Short: "registry stores and distributes Docker images", - Long: "registry stores and distributes Docker images.", +// ServeCmd is a cobra command for running the registry. +var ServeCmd = &cobra.Command{ + Use: "serve ", + Short: "`serve` stores and distributes Docker images", + Long: "`serve` stores and distributes Docker images.", Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } // setup context ctx := context.WithVersion(context.Background(), version.Version) @@ -65,12 +61,6 @@ var Cmd = &cobra.Command{ }, } -var showVersion bool - -func init() { - Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - // A Registry represents a complete instance of the registry. // TODO(aaronl): It might make sense for Registry to become an interface. type Registry struct { diff --git a/docs/root.go b/docs/root.go new file mode 100644 index 00000000..46338b46 --- /dev/null +++ b/docs/root.go @@ -0,0 +1,28 @@ +package registry + +import ( + "github.com/docker/distribution/version" + "github.com/spf13/cobra" +) + +var showVersion bool + +func init() { + RootCmd.AddCommand(ServeCmd) + RootCmd.AddCommand(GCCmd) + RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") +} + +// RootCmd is the main command for the 'registry' binary. +var RootCmd = &cobra.Command{ + Use: "registry", + Short: "`registry`", + Long: "`registry`", + Run: func(cmd *cobra.Command, args []string) { + if showVersion { + version.PrintVersion() + return + } + cmd.Usage() + }, +} diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index f8fe23fe..9034cb68 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -1,6 +1,8 @@ package storage import ( + "path" + "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -85,6 +87,36 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr }, bs.driver.PutContent(ctx, bp, p) } +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { + + specPath, err := pathFor(blobsPathSpec{}) + if err != nil { + return err + } + + err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { + return nil + } + + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } + + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } + + return ingester(digest) + }) + return err +} + // path returns the canonical path for the blob identified by digest. The blob // may or may not exist. func (bs *blobStore) path(dgst digest.Digest) (string, error) { diff --git a/docs/storage/catalog.go b/docs/storage/catalog.go index 481489f2..3b13b7ad 100644 --- a/docs/storage/catalog.go +++ b/docs/storage/catalog.go @@ -64,3 +64,34 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return n, errVal } + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + repoNameBuffer := make([]string, 100) + var last string + for { + n, err := reg.Repositories(ctx, repoNameBuffer, last) + if err != nil && err != io.EOF { + return err + } + + if n == 0 { + break + } + + last = repoNameBuffer[n-1] + for i := 0; i < n; i++ { + repoName := repoNameBuffer[i] + err = ingester(repoName) + if err != nil { + return err + } + } + + if err == io.EOF { + break + } + } + return nil + +} diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 3e6f9c2d..76a1c29d 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "net/http" + "path" "time" "github.com/docker/distribution" @@ -37,6 +38,9 @@ type linkedBlobStore struct { // removed an the blob links folder should be merged. The first entry is // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc + + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec } var _ distribution.BlobStore = &linkedBlobStore{} @@ -236,6 +240,55 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro return nil } +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { repo, err := lbs.registry.Repository(ctx, sourceRepo) if err != nil { diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index e259af48..f3660c98 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "path" "encoding/json" "github.com/docker/distribution" @@ -129,6 +130,52 @@ func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { return ms.blobStore.Delete(ctx, dgst) } -func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - return 0, distribution.ErrUnsupported +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) + if err != nil { + return err + } + return nil + }) + return err +} + +// Only valid for schema1 signed manifests +func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) { + // sanity check that digest refers to a schema1 digest + manifest, err := ms.Get(ctx, manifestDigest) + if err != nil { + return nil, err + } + + if _, ok := manifest.(*schema1.SignedManifest); !ok { + return nil, fmt.Errorf("digest %v is not for schema1 manifest", manifestDigest) + } + + signaturesPath, err := pathFor(manifestSignaturesPathSpec{ + name: ms.repository.Named().Name(), + revision: manifestDigest, + }) + if err != nil { + return nil, err + } + + signaturesPath = path.Join(signaturesPath, "sha256") + + signaturePaths, err := ms.blobStore.driver.List(ctx, signaturesPath) + if err != nil { + return nil, err + } + + var digests []digest.Digest + for _, sigPath := range signaturePaths { + sigdigest, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + if err != nil { + // merely found not a digest + continue + } + digests = append(digests, sigdigest) + } + return digests, nil } diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 6ee54127..8985f043 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -74,6 +74,7 @@ const ( // // Manifests: // +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ // manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// // manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link // manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ @@ -100,6 +101,7 @@ const ( // // Blob Store: // +// blobsPathSpec: /v2/blobs/ // blobPathSpec: /v2/blobs/// // blobDataPathSpec: /v2/blobs////data // blobMediaTypePathSpec: /v2/blobs////data @@ -125,6 +127,9 @@ func pathFor(spec pathSpec) (string, error) { switch v := spec.(type) { + case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil + case manifestRevisionPathSpec: components, err := digestPathComponents(v.revision, false) if err != nil { @@ -246,6 +251,17 @@ func pathFor(spec pathSpec) (string, error) { blobLinkPathComponents := append(repoPrefix, v.name, "_layers") return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil + case blobPathSpec: + components, err := digestPathComponents(v.digest, true) + if err != nil { + return "", err + } + + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil case blobDataPathSpec: components, err := digestPathComponents(v.digest, true) if err != nil { @@ -281,6 +297,14 @@ type pathSpec interface { pathSpec() } +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. +type manifestRevisionsPathSpec struct { + name string +} + +func (manifestRevisionsPathSpec) pathSpec() {} + // manifestRevisionPathSpec describes the components of the directory path for // a manifest revision. type manifestRevisionPathSpec struct { @@ -404,12 +428,17 @@ var blobAlgorithmReplacer = strings.NewReplacer( ";", "/", ) -// // blobPathSpec contains the path for the registry global blob store. -// type blobPathSpec struct { -// digest digest.Digest -// } +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} -// func (blobPathSpec) pathSpec() {} +func (blobsPathSpec) pathSpec() {} + +// blobPathSpec contains the path for the registry global blob store. +type blobPathSpec struct { + digest digest.Digest +} + +func (blobPathSpec) pathSpec() {} // blobDataPathSpec contains the path for the registry global blob store. For // now, this contains layer data, exclusively. @@ -491,3 +520,23 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) return append(prefix, suffix...), nil } + +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next + } + + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() +} diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 2ad78e9d..91004bd4 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -2,6 +2,8 @@ package storage import ( "testing" + + "github.com/docker/distribution/digest" ) func TestPathMapper(t *testing.T) { @@ -120,3 +122,29 @@ func TestPathMapper(t *testing.T) { } } + +func TestDigestFromPath(t *testing.T) { + for _, testcase := range []struct { + path string + expected digest.Digest + multilevel bool + err error + }{ + { + path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", + multilevel: true, + expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", + err: nil, + }, + } { + result, err := digestFromPath(testcase.path) + if err != testcase.err { + t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) + } + + if result != testcase.expected { + t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) + + } + } +} diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 9c74ebbc..a1128b4a 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -147,6 +147,14 @@ func (reg *registry) Repository(ctx context.Context, canonicalName reference.Nam }, nil } +func (reg *registry) Blobs() distribution.BlobEnumerator { + return reg.blobStore +} + +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter +} + // repository provides name-scoped access to various services. type repository struct { *registry @@ -180,6 +188,8 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M blobLinkPath, } + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + blobStore := &linkedBlobStore{ ctx: ctx, blobStore: repo.blobStore, @@ -193,7 +203,8 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, } ms := &manifestStore{ diff --git a/docs/storage/vacuum.go b/docs/storage/vacuum.go index 60d5a2fa..3bdfebf2 100644 --- a/docs/storage/vacuum.go +++ b/docs/storage/vacuum.go @@ -34,11 +34,13 @@ func (v Vacuum) RemoveBlob(dgst string) error { return err } - blobPath, err := pathFor(blobDataPathSpec{digest: d}) + blobPath, err := pathFor(blobPathSpec{digest: d}) if err != nil { return err } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) if err != nil { return err From 396a73deb761d077b4dee1947a3368d3dde9d00b Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Fri, 22 Jan 2016 14:40:21 +0000 Subject: [PATCH 419/501] StorageDriver: GCS: remove support for directory Moves The Move operation is only used to move uploaded blobs to their final destination. There is no point in implementing Move on "folders". Apart from simplifying the code, this also saves an HTTP request. Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 51 +++++++---------------------- docs/storage/driver/gcs/gcs_test.go | 37 +++++++++++++++++++++ 2 files changed, 49 insertions(+), 39 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 0e3480f2..c83223cd 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -38,6 +38,8 @@ import ( "google.golang.org/cloud" "google.golang.org/cloud/storage" + "github.com/Sirupsen/logrus" + ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" @@ -469,43 +471,8 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the // original object. func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { - prefix := d.pathToDirKey(sourcePath) gcsContext := d.context(context) - keys, err := d.listAll(gcsContext, prefix) - if err != nil { - return err - } - if len(keys) > 0 { - destPrefix := d.pathToDirKey(destPath) - copies := make([]string, 0, len(keys)) - sort.Strings(keys) - var err error - for _, key := range keys { - dest := destPrefix + key[len(prefix):] - _, err = storageCopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) - if err == nil { - copies = append(copies, dest) - } else { - break - } - } - // if an error occurred, attempt to cleanup the copies made - if err != nil { - for i := len(copies) - 1; i >= 0; i-- { - _ = storageDeleteObject(gcsContext, d.bucket, copies[i]) - } - return err - } - // delete originals - for i := len(keys) - 1; i >= 0; i-- { - err2 := storageDeleteObject(gcsContext, d.bucket, keys[i]) - if err2 != nil { - err = err2 - } - } - return err - } - _, err = storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) + _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { @@ -514,7 +481,13 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e } return err } - return storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + // if deleting the file fails, log the error, but do not fail; the file was succesfully copied, + // and the original should eventually be cleaned when purging the uploads folder. + if err != nil { + logrus.Infof("error deleting file: %v due to %v", sourcePath, err) + } + return nil } // listAll recursively lists all names of objects stored at "prefix" and its subpaths. @@ -530,8 +503,8 @@ func (d *driver) listAll(context context.Context, prefix string) ([]string, erro } for _, obj := range objects.Results { // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted if obj.Deleted.IsZero() { list = append(list, obj.Name) } diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 554d95e4..7059b953 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -175,3 +175,40 @@ func TestEmptyRootList(t *testing.T) { } } } + +// TestMoveDirectory checks that moving a directory returns an error. +func TestMoveDirectory(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + ctx := ctx.Background() + contents := []byte("contents") + // Create a regular file. + err = driver.PutContent(ctx, "/parent/dir/foo", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := driver.Delete(ctx, "/parent") + if err != nil { + t.Fatalf("failed to remove /parent due to %v\n", err) + } + }() + + err = driver.Move(ctx, "/parent/dir", "/parent/other") + if err == nil { + t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") + } +} From f49bf18768097d37bb7608725290d43e02be95ce Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 4 Mar 2016 00:34:17 -0800 Subject: [PATCH 420/501] Fetch token by credentials and refresh token Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/session.go | 238 ++++++++++++++++++++++--------- docs/client/auth/session_test.go | 177 ++++++++++++++++++++++- docs/proxy/proxyauth.go | 7 + 3 files changed, 348 insertions(+), 74 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index a9b228c5..bd2d16bd 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -36,6 +36,14 @@ type AuthenticationHandler interface { type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) } // NewAuthorizer creates an authorizer which can handle multiple authentication @@ -196,95 +204,73 @@ func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes } now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { - tr, err := th.fetchToken(params) + token, expiration, err := th.fetchToken(params) if err != nil { return err } - th.tokenCache = tr.Token - th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } } return nil } -type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { - realm, ok := params["realm"] - if !ok { - return nil, errors.New("no realm specified for token auth challenge") +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + // TODO: Make this configurable + form.Set("client_id", "docker") + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") } - // TODO(dmcgowan): Handle empty scheme - - realmURL, err := url.Parse(realm) + resp, err := th.client().PostForm(realm.String(), form) if err != nil { - return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - req, err := http.NewRequest("GET", realmURL.String(), nil) - if err != nil { - return nil, err - } - - reqParams := req.URL.Query() - service := params["service"] - scope := th.scope.String() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) - } - - for scope := range th.additionalScopes { - reqParams.Add("scope", scope) - } - - if th.creds != nil { - username, password := th.creds.Basic(realmURL) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return nil, err + return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) - return nil, err + return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %s", err) + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) } - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return nil, errors.New("authorization server did not include a token in the response") + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } if tr.ExpiresIn < minimumTokenLifetimeSeconds { @@ -295,10 +281,128 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now() + tr.IssuedAt = th.clock.Now().UTC() } - return tr, nil + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, errors.New("authorization server did not include a token in the response") + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + scopes := make([]string, 0, 1+len(th.additionalScopes)) + if len(th.scope.Actions) > 0 { + scopes = append(scopes, th.scope.String()) + } + for scope := range th.additionalScopes { + scopes = append(scopes, scope) + } + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + // TODO(dmcgowan): define parameter to force oauth with password + if refreshToken != "" { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) } type basicHandler struct { diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go index f1686942..3b1c0b80 100644 --- a/docs/client/auth/session_test.go +++ b/docs/client/auth/session_test.go @@ -80,14 +80,25 @@ func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersio } type testCredentialStore struct { - username string - password string + username string + password string + refreshTokens map[string]string } func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { return tcs.username, tcs.password } +func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { + return tcs.refreshTokens[service] +} + +func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { + if tcs.refreshTokens != nil { + tcs.refreshTokens[service] = token + } +} + func TestEndpointAuthorizeToken(t *testing.T) { service := "localhost.localdomain" repo1 := "some/registry" @@ -162,14 +173,11 @@ func TestEndpointAuthorizeToken(t *testing.T) { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } - badCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e2, c2 := testServerWithAuth(m, authenicate, badCheck) + e2, c2 := testServerWithAuth(m, authenicate, validCheck) defer c2() challengeManager2 := NewSimpleChallengeManager() - versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") + versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) } @@ -199,6 +207,161 @@ func TestEndpointAuthorizeToken(t *testing.T) { } } +func TestEndpointAuthorizeRefreshToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + refreshToken1 := "0123456790abcdef" + refreshToken2 := "0123456790fedcba" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + { + // In the future this test may fail and require using basic auth to get a different refresh token + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), + }, + }, + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := NewSimpleChallengeManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + creds := &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + // Try with refresh token setting + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := NewSimpleChallengeManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } + + if creds.refreshTokens[service] != refreshToken2 { + t.Fatalf("Refresh token not set after change") + } + + // Try with bad token + e3, c3 := testServerWithAuth(m, authenicate, validCheck) + defer c3() + + challengeManager3 := NewSimpleChallengeManager() + versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client3 := &http.Client{Transport: transport3} + + req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) + resp, err = client3.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + func basicAuth(username, password string) string { auth := username + ":" + password return base64.StdEncoding.EncodeToString([]byte(auth)) diff --git a/docs/proxy/proxyauth.go b/docs/proxy/proxyauth.go index 6f0eb005..a9cc43a6 100644 --- a/docs/proxy/proxyauth.go +++ b/docs/proxy/proxyauth.go @@ -25,6 +25,13 @@ func (c credentials) Basic(u *url.URL) (string, string) { return up.username, up.password } +func (c credentials) RefreshToken(u *url.URL, service string) string { + return "" +} + +func (c credentials) SetRefreshToken(u *url.URL, service, token string) { +} + // configureAuth stores credentials for challenge responses func configureAuth(username, password string) (auth.CredentialStore, error) { creds := map[string]userpass{ From 6a6c22e2b9412502e98bcd3fd54e53c6a90c6ae2 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 4 Mar 2016 11:32:48 -0800 Subject: [PATCH 421/501] Add options struct to initialize handler Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/session.go | 117 +++++++++++++++++++------------ docs/client/auth/session_test.go | 35 +++++++-- 2 files changed, 101 insertions(+), 51 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index bd2d16bd..35ccabf1 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -113,27 +113,45 @@ type clock interface { type tokenHandler struct { header http.Header creds CredentialStore - scope tokenScope transport http.RoundTripper clock clock + forceOAuth bool + clientID string + scopes []Scope + tokenLock sync.Mutex tokenCache string tokenExpiration time.Time - - additionalScopes map[string]struct{} } -// tokenScope represents the scope at which a token will be requested. -// This represents a specific action on a registry resource. -type tokenScope struct { - Resource string - Scope string - Actions []string +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string } -func (ts tokenScope) String() string { - return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + ForceOAuth bool + ClientID string + Scopes []Scope } // An implementation of clock for providing real time data. @@ -145,22 +163,32 @@ func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - return newTokenHandler(transport, creds, realClock{}, scope, actions...) + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) } -// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. -func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { - return &tokenHandler{ - transport: transport, - creds: creds, - clock: c, - scope: tokenScope{ - Resource: "repository", - Scope: scope, - Actions: actions, - }, - additionalScopes: map[string]struct{}{}, +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, } + + return handler } func (th *tokenHandler) client() *http.Client { @@ -177,10 +205,9 @@ func (th *tokenHandler) Scheme() string { func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, tokenScope{ - Resource: "repository", - Scope: fromParam, - Actions: []string{"pull"}, + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, }.String()) } if err := th.refreshToken(params, additionalScopes...); err != nil { @@ -195,16 +222,19 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } var addedScopes bool for _, scope := range additionalScopes { - if _, ok := th.additionalScopes[scope]; !ok { - th.additionalScopes[scope] = struct{}{} - addedScopes = true - } + scopes = append(scopes, scope) + addedScopes = true } + now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params) + token, expiration, err := th.fetchToken(params, scopes) if err != nil { return err } @@ -232,8 +262,12 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic form.Set("scope", strings.Join(scopes, " ")) form.Set("service", service) - // TODO: Make this configurable - form.Set("client_id", "docker") + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = "registry-client" + } + form.Set("client_id", clientID) if refreshToken != "" { form.Set("grant_type", "refresh_token") @@ -369,7 +403,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil } -func (th *tokenHandler) fetchToken(params map[string]string) (token string, expiration time.Time, err error) { +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { realm, ok := params["realm"] if !ok { return "", time.Time{}, errors.New("no realm specified for token auth challenge") @@ -383,22 +417,13 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token string, expi service := params["service"] - scopes := make([]string, 0, 1+len(th.additionalScopes)) - if len(th.scope.Actions) > 0 { - scopes = append(scopes, th.scope.String()) - } - for scope := range th.additionalScopes { - scopes = append(scopes, scope) - } - var refreshToken string if th.creds != nil { refreshToken = th.creds.RefreshToken(realmURL, service) } - // TODO(dmcgowan): define parameter to force oauth with password - if refreshToken != "" { + if refreshToken != "" || th.forceOAuth { return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) } diff --git a/docs/client/auth/session_test.go b/docs/client/auth/session_test.go index 3b1c0b80..96c62990 100644 --- a/docs/client/auth/session_test.go +++ b/docs/client/auth/session_test.go @@ -220,7 +220,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { Request: testutil.Request{ Method: "POST", Route: "/token", - Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -232,7 +232,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { Request: testutil.Request{ Method: "POST", Route: "/token", - Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -243,7 +243,7 @@ func TestEndpointAuthorizeRefreshToken(t *testing.T) { Request: testutil.Request{ Method: "POST", Route: "/token", - Body: []byte(fmt.Sprintf("client_id=docker&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -542,7 +542,19 @@ func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { t.Fatal(err) } clock := &fakeClock{current: time.Now()} - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange @@ -680,7 +692,20 @@ func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { if err != nil { t.Fatal(err) } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange From d6a1778282213ffc9ecdebe8ec985a457b492527 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 4 Mar 2016 13:53:06 -0800 Subject: [PATCH 422/501] Add post token implementation Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/auth/auth.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/auth/auth.go b/docs/auth/auth.go index 0164246c..0cb37235 100644 --- a/docs/auth/auth.go +++ b/docs/auth/auth.go @@ -54,7 +54,7 @@ var ( // ErrInvalidCredential is returned when the auth token does not authenticate correctly. ErrInvalidCredential = errors.New("invalid authorization credential") - // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + // ErrAuthenticationFailure returned when authentication fails. ErrAuthenticationFailure = errors.New("authentication failure") ) @@ -106,7 +106,7 @@ type AccessController interface { Authorized(ctx context.Context, access ...Access) (context.Context, error) } -// CredentialAuthenticator is an object which is able to validate credentials +// CredentialAuthenticator is an object which is able to authenticate credentials type CredentialAuthenticator interface { AuthenticateUser(username, password string) error } From e0420f4045facaed733b5d0685320db7f8f11c9f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 4 Mar 2016 14:32:51 -0800 Subject: [PATCH 423/501] Add offline token option Login needs to add an offline token flag to ensure a refresh token is returned by the token endpoint. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/session.go | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 35ccabf1..b2811351 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -116,9 +116,10 @@ type tokenHandler struct { transport http.RoundTripper clock clock - forceOAuth bool - clientID string - scopes []Scope + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope tokenLock sync.Mutex tokenCache string @@ -149,9 +150,10 @@ type TokenHandlerOptions struct { Transport http.RoundTripper Credentials CredentialStore - ForceOAuth bool - ClientID string - Scopes []Scope + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope } // An implementation of clock for providing real time data. @@ -180,12 +182,13 @@ func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope s // options structure. func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, } return handler @@ -346,6 +349,10 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, reqParams.Add("scope", scope) } + if th.offlineAccess { + reqParams.Add("offline_token", "true") + } + if th.creds != nil { username, password := th.creds.Basic(realm) if username != "" && password != "" { From c536ae90a8f7ea43ce191096f335afe3fa370fa5 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 4 Mar 2016 15:13:27 -0800 Subject: [PATCH 424/501] Fix oauth cross repository push Cross repository push tokens were not being cached and could not be used, now any returned token will be used and the caching is hidden in the getToken function. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/session.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index b2811351..3f6e9164 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -213,16 +213,18 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st Actions: []string{"pull"}, }.String()) } - if err := th.refreshToken(params, additionalScopes...); err != nil { + + token, err := th.getToken(params, additionalScopes...) + if err != nil { return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil } -func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) @@ -239,7 +241,7 @@ func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes if now.After(th.tokenExpiration) || addedScopes { token, expiration, err := th.fetchToken(params, scopes) if err != nil { - return err + return "", err } // do not update cache for added scope tokens @@ -247,9 +249,11 @@ func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes th.tokenCache = token th.tokenExpiration = expiration } + + return token, nil } - return nil + return th.tokenCache, nil } type postTokenResponse struct { From 2494c28e1f590caacfaeb203c8b17deed2dd31d1 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 7 Mar 2016 11:50:46 -0800 Subject: [PATCH 425/501] [driver/s3aws] Update s3aws driver parameter parsing to match s3goamz Mirrors changes from #1414 into the newer driver Signed-off-by: Brian Bland --- docs/storage/driver/s3-aws/s3.go | 96 +++++++++++++++++++------------- 1 file changed, 56 insertions(+), 40 deletions(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index af62d3f0..0e113680 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -129,17 +129,17 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating // with an IAM on an ec2 instance (in which case the instance credentials will // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { + accessKey := parameters["accesskey"] + if accessKey == nil { accessKey = "" } - secretKey, ok := parameters["secretkey"] - if !ok { + secretKey := parameters["secretkey"] + if secretKey == nil { secretKey = "" } regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { + if regionName == nil || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } region := fmt.Sprint(regionName) @@ -148,60 +148,76 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("Invalid region provided: %v", region) } - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { return nil, fmt.Errorf("The encrypt parameter should be a boolean") } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") } secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { return nil, fmt.Errorf("The secure parameter should be a boolean") } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") } chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) } - rootDirectory, ok := parameters["rootdirectory"] - if !ok { + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { rootDirectory = "" } storageClass := s3.StorageClassStandard - storageClassParam, ok := parameters["storageclass"] - if ok { + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { storageClassString, ok := storageClassParam.(string) if !ok { return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) @@ -214,8 +230,8 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { storageClass = storageClassString } - userAgent, ok := parameters["useragent"] - if !ok { + userAgent := parameters["useragent"] + if userAgent == nil { userAgent = "" } From e09891e2cfeac92c324067b6b5209e6ed98b784c Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 26 Feb 2016 14:18:09 -0800 Subject: [PATCH 426/501] URL parse auth endpoints to normalize hostname to lowercase. It is possible for a middlebox to lowercase the URL at somepoint causing a lookup in the auth challenges table to fail. Lowercase hostname before using as keys to challenge map. Signed-off-by: Richard Scothern --- docs/client/auth/authchallenge.go | 11 ++++--- docs/client/auth/authchallenge_test.go | 43 ++++++++++++++++++++++++++ docs/client/auth/session.go | 4 +-- docs/handlers/blobupload.go | 1 + docs/proxy/proxyregistry.go | 19 ++++++------ 5 files changed, 61 insertions(+), 17 deletions(-) diff --git a/docs/client/auth/authchallenge.go b/docs/client/auth/authchallenge.go index a6ad45d8..c8cd83bb 100644 --- a/docs/client/auth/authchallenge.go +++ b/docs/client/auth/authchallenge.go @@ -25,7 +25,7 @@ type Challenge struct { type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. - GetChallenges(endpoint string) ([]Challenge, error) + GetChallenges(endpoint url.URL) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of @@ -48,8 +48,10 @@ func NewSimpleChallengeManager() ChallengeManager { type simpleChallengeManager map[string][]Challenge -func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { - challenges := m[endpoint] +func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + endpoint.Host = strings.ToLower(endpoint.Host) + + challenges := m[endpoint.String()] return challenges, nil } @@ -60,11 +62,10 @@ func (m simpleChallengeManager) AddResponse(resp *http.Response) error { } urlCopy := url.URL{ Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, + Host: strings.ToLower(resp.Request.URL.Host), Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges - return nil } diff --git a/docs/client/auth/authchallenge_test.go b/docs/client/auth/authchallenge_test.go index 9b6a5adc..953ed5b4 100644 --- a/docs/client/auth/authchallenge_test.go +++ b/docs/client/auth/authchallenge_test.go @@ -1,7 +1,10 @@ package auth import ( + "fmt" "net/http" + "net/url" + "strings" "testing" ) @@ -36,3 +39,43 @@ func TestAuthChallengeParse(t *testing.T) { } } + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + + scm := NewSimpleChallengeManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index a9b228c5..d8ea1f75 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -67,9 +67,7 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { Path: req.URL.Path[:v2Root+4], } - pingEndpoint := ping.String() - - challenges, err := ea.challenges.GetChallenges(pingEndpoint) + challenges, err := ea.challenges.GetChallenges(ping) if err != nil { return err } diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index e2c34d83..f631e4d4 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -340,6 +340,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. w.Header().Set("Docker-Upload-UUID", buh.UUID) w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) diff --git a/docs/proxy/proxyregistry.go b/docs/proxy/proxyregistry.go index e25fe783..f0685788 100644 --- a/docs/proxy/proxyregistry.go +++ b/docs/proxy/proxyregistry.go @@ -22,13 +22,13 @@ import ( type proxyingRegistry struct { embedded distribution.Namespace // provides local registry functionality scheduler *scheduler.TTLExpirationScheduler - remoteURL string + remoteURL url.URL authChallenger authChallenger } // NewRegistryPullThroughCache creates a registry acting as a pull through cache func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { - _, err := url.Parse(config.RemoteURL) + remoteURL, err := url.Parse(config.RemoteURL) if err != nil { return nil, err } @@ -99,9 +99,9 @@ func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Name return &proxyingRegistry{ embedded: registry, scheduler: s, - remoteURL: config.RemoteURL, + remoteURL: *remoteURL, authChallenger: &remoteAuthChallenger{ - remoteURL: config.RemoteURL, + remoteURL: *remoteURL, cm: auth.NewSimpleChallengeManager(), cs: cs, }, @@ -131,7 +131,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named return nil, err } - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) if err != nil { return nil, err } @@ -174,7 +174,7 @@ type authChallenger interface { } type remoteAuthChallenger struct { - remoteURL string + remoteURL url.URL sync.Mutex cm auth.ChallengeManager cs auth.CredentialStore @@ -193,8 +193,9 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error r.Lock() defer r.Unlock() - remoteURL := r.remoteURL + "/v2/" - challenges, err := r.cm.GetChallenges(remoteURL) + remoteURL := r.remoteURL + remoteURL.Path = "/v2/" + challenges, err := r.cm.GetChallenges(r.remoteURL) if err != nil { return err } @@ -204,7 +205,7 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error } // establish challenge type with upstream - if err := ping(r.cm, remoteURL, challengeHeader); err != nil { + if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { return err } From 5ca3b61609fee5c3a0d4cab19ad0fb5aabd67a4f Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 8 Mar 2016 15:13:24 -0800 Subject: [PATCH 427/501] Fix two misspellings in source code comments Signed-off-by: Aaron Lehmann --- docs/storage/driver/gcs/gcs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 1fa2bca8..9d8a8458 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -482,7 +482,7 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e return err } err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) - // if deleting the file fails, log the error, but do not fail; the file was succesfully copied, + // if deleting the file fails, log the error, but do not fail; the file was successfully copied, // and the original should eventually be cleaned when purging the uploads folder. if err != nil { logrus.Infof("error deleting file: %v due to %v", sourcePath, err) From c69c8a3286c98d9f072c4c8a4e2eb2fffffaf2ab Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 8 Feb 2016 14:29:21 -0800 Subject: [PATCH 428/501] Adds new storagedriver.FileWriter interface Updates registry storage code to use this for better resumable writes. Implements this interface for the following drivers: + Inmemory + Filesystem + S3 + Azure Signed-off-by: Brian Bland --- docs/client/blob_writer.go | 18 +- docs/handlers/blobupload.go | 49 +- docs/storage/blob_test.go | 11 +- docs/storage/blobwriter.go | 49 +- docs/storage/blobwriter_resumable.go | 45 +- docs/storage/driver/azure/azure.go | 164 ++++- docs/storage/driver/azure/blockblob.go | 24 - docs/storage/driver/azure/blockblob_test.go | 155 ---- docs/storage/driver/azure/blockid.go | 60 -- docs/storage/driver/azure/blockid_test.go | 74 -- docs/storage/driver/azure/randomwriter.go | 208 ------ .../storage/driver/azure/randomwriter_test.go | 339 --------- docs/storage/driver/azure/zerofillwriter.go | 49 -- .../driver/azure/zerofillwriter_test.go | 126 ---- docs/storage/driver/base/base.go | 24 +- docs/storage/driver/filesystem/driver.go | 146 +++- docs/storage/driver/inmemory/driver.go | 126 +++- docs/storage/driver/s3-aws/s3.go | 670 ++++++++---------- docs/storage/driver/s3-goamz/s3.go | 549 ++++++-------- docs/storage/driver/storagedriver.go | 30 +- docs/storage/driver/testsuites/testsuites.go | 247 +++---- docs/storage/filereader.go | 2 +- docs/storage/filewriter.go | 135 ---- docs/storage/filewriter_test.go | 226 ------ docs/storage/linkedblobstore.go | 21 +- 25 files changed, 1059 insertions(+), 2488 deletions(-) delete mode 100644 docs/storage/driver/azure/blockblob.go delete mode 100644 docs/storage/driver/azure/blockblob_test.go delete mode 100644 docs/storage/driver/azure/blockid.go delete mode 100644 docs/storage/driver/azure/blockid_test.go delete mode 100644 docs/storage/driver/azure/randomwriter.go delete mode 100644 docs/storage/driver/azure/randomwriter_test.go delete mode 100644 docs/storage/driver/azure/zerofillwriter.go delete mode 100644 docs/storage/driver/azure/zerofillwriter_test.go delete mode 100644 docs/storage/filewriter.go delete mode 100644 docs/storage/filewriter_test.go diff --git a/docs/client/blob_writer.go b/docs/client/blob_writer.go index 21a018dc..e3ffcb00 100644 --- a/docs/client/blob_writer.go +++ b/docs/client/blob_writer.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "time" "github.com/docker/distribution" @@ -104,21 +103,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { } -func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hbu.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset += int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - hbu.offset = newOffset - - return hbu.offset, nil +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset } func (hbu *httpBlobUpload) ID() string { diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index e9f0f513..892393aa 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "net/url" - "os" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -76,28 +75,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } buh.Upload = upload - if state.Offset > 0 { - // Seek the blob upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } else if nn != buh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } + if size := upload.Size(); size != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) } - return closeResources(handler, buh.Upload) } @@ -239,10 +224,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - size := buh.State.Offset - if offset, err := buh.Upload.Seek(0, os.SEEK_CUR); err == nil { - size = offset - } + size := buh.Upload.Size() desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ Digest: dgst, @@ -308,21 +290,10 @@ func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Re // uploads always start at a 0 offset. This allows disabling resumable push by // always returning a 0 offset on check status. func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = buh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) - return err - } - } - // TODO(stevvooe): Need a better way to manage the upload state automatically. buh.State.Name = buh.Repository.Named().Name() buh.State.UUID = buh.Upload.ID() - buh.State.Offset = offset + buh.State.Offset = buh.Upload.Size() buh.State.StartedAt = buh.Upload.StartedAt() token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) @@ -341,7 +312,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. return err } - endRange := offset + endRange := buh.Upload.Size() if endRange > 0 { endRange = endRange - 1 } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 1e5b408c..3698a415 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -41,10 +41,7 @@ func TestWriteSeek(t *testing.T) { } contents := []byte{1, 2, 3} blobUpload.Write(contents) - offset, err := blobUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error in blobUpload.Seek: %s", err) - } + offset := blobUpload.Size() if offset != int64(len(contents)) { t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) } @@ -113,11 +110,7 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("layer data write incomplete") } - offset, err := blobUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error seeking layer upload: %v", err) - } - + offset := blobUpload.Size() if offset != nn { t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) } diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index f2ca7388..7f280d36 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -21,6 +21,7 @@ var ( // layerWriter is used to control the various aspects of resumable // layer upload. It implements the LayerUpload interface. type blobWriter struct { + ctx context.Context blobStore *linkedBlobStore id string @@ -28,9 +29,9 @@ type blobWriter struct { digester digest.Digester written int64 // track the contiguous write - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - fileWriter + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string resumableDigestEnabled bool } @@ -51,7 +52,7 @@ func (bw *blobWriter) StartedAt() time.Time { func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { context.GetLogger(ctx).Debug("(*blobWriter).Commit") - if err := bw.fileWriter.Close(); err != nil { + if err := bw.fileWriter.Commit(); err != nil { return distribution.Descriptor{}, err } @@ -84,6 +85,10 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) // the writer and canceling the operation. func (bw *blobWriter) Cancel(ctx context.Context) error { context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + if err := bw.removeResources(ctx); err != nil { return err } @@ -92,15 +97,19 @@ func (bw *blobWriter) Cancel(ctx context.Context) error { return nil } +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + func (bw *blobWriter) Write(p []byte) (int, error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { return 0, err } - n, err := io.MultiWriter(&bw.fileWriter, bw.digester.Hash()).Write(p) + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) bw.written += int64(n) return n, err @@ -110,21 +119,17 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { return 0, err } - nn, err := bw.fileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) bw.written += nn return nn, err } func (bw *blobWriter) Close() error { - if bw.err != nil { - return bw.err - } - if err := bw.storeHashState(bw.blobStore.ctx); err != nil { return err } @@ -148,8 +153,10 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } } + var size int64 + // Stat the on disk file - if fi, err := bw.fileWriter.driver.Stat(ctx, bw.path); err != nil { + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is @@ -165,23 +172,23 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) } - bw.size = fi.Size() + size = fi.Size() } if desc.Size > 0 { - if desc.Size != bw.size { + if desc.Size != size { return distribution.Descriptor{}, distribution.ErrBlobInvalidLength } } else { // if provided 0 or negative length, we can assume caller doesn't know or // care about length. - desc.Size = bw.size + desc.Size = size } // TODO(stevvooe): This section is very meandering. Need to be broken down // to be a lot more clear. - if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + if err := bw.resumeDigest(ctx); err == nil { canonical = bw.digester.Digest() if canonical.Algorithm() == desc.Digest.Algorithm() { @@ -206,7 +213,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. - if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } @@ -223,7 +230,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.fileWriter.driver, bw.path, desc.Size) + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } @@ -357,7 +364,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 try := 1 for try <= 5 { - _, err := bw.fileWriter.driver.Stat(bw.ctx, bw.path) + _, err := bw.driver.Stat(bw.ctx, bw.path) if err == nil { break } @@ -371,7 +378,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { } } - readCloser, err := bw.fileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) if err != nil { return nil, err } diff --git a/docs/storage/blobwriter_resumable.go b/docs/storage/blobwriter_resumable.go index 5ae29c54..ff5482c3 100644 --- a/docs/storage/blobwriter_resumable.go +++ b/docs/storage/blobwriter_resumable.go @@ -4,8 +4,6 @@ package storage import ( "fmt" - "io" - "os" "path" "strconv" @@ -19,24 +17,18 @@ import ( _ "github.com/stevvooe/resumable/sha512" ) -// resumeDigestAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { if !bw.resumableDigestEnabled { return errResumableDigestNotAvailable } - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - h, ok := bw.digester.Hash().(resumable.Hash) if !ok { return errResumableDigestNotAvailable } - + offset := bw.fileWriter.Size() if offset == int64(h.Len()) { // State of digester is already at the requested offset. return nil @@ -49,24 +41,12 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) } - // Find the highest stored hashState with offset less than or equal to + // Find the highest stored hashState with offset equal to // the requested offset. for _, hashState := range hashStates { if hashState.offset == offset { hashStateMatch = hashState break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := bw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } } } @@ -86,20 +66,7 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { // Mind the gap. if gapLen := offset - int64(h.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) - if err != nil { - return err - } - defer fr.Close() - - if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) - } - - if _, err := io.CopyN(h, fr, gapLen); err != nil { - return err - } + return errResumableDigestNotAvailable } return nil diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index cbb95981..70771375 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -3,6 +3,7 @@ package azure import ( + "bufio" "bytes" "fmt" "io" @@ -26,6 +27,7 @@ const ( paramAccountKey = "accountkey" paramContainer = "container" paramRealm = "realm" + maxChunkSize = 4 * 1024 * 1024 ) type driver struct { @@ -117,18 +119,21 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { return err } - if err := d.client.CreateBlockBlob(d.container, path); err != nil { + writer, err := d.Writer(ctx, path, false) + if err != nil { return err } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) - return err + defer writer.Close() + _, err = writer.Write(contents) + if err != nil { + return err + } + return writer.Commit() } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { @@ -153,25 +158,38 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp, nil } -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - if blobExists, err := d.client.BlobExists(d.container, path); err != nil { - return 0, err - } else if !blobExists { - err := d.client.CreateBlockBlob(d.container, path) +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + blobExists, err := d.client.BlobExists(d.container, path) + if err != nil { + return nil, err + } + var size int64 + if blobExists { + if append { + blobProperties, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + size = blobProperties.ContentLength + } else { + err := d.client.DeleteBlob(d.container, path) + if err != nil { + return nil, err + } + } + } else { + if append { + return nil, storagedriver.PathNotFoundError{Path: path} + } + err := d.client.PutAppendBlob(d.container, path, nil) if err != nil { - return 0, err + return nil, err } } - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - zw := newZeroFillWriter(&bw) - return zw.Write(d.container, path, offset, reader) + return d.newWriter(path, size), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -236,6 +254,9 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { } list := directDescendants(blobs, path) + if path != "" && len(list) == 0 { + return nil, storagedriver.PathNotFoundError{Path: path} + } return list, nil } @@ -361,6 +382,101 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { } func is404(err error) bool { - e, ok := err.(azure.AzureStorageServiceError) - return ok && e.StatusCode == http.StatusNotFound + statusCodeErr, ok := err.(azure.UnexpectedStatusCodeError) + return ok && statusCodeErr.Got() == http.StatusNotFound +} + +type writer struct { + driver *driver + path string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { + return &writer{ + driver: d, + path: path, + size: size, + bw: bufio.NewWriterSize(&blockWriter{ + client: d.client, + container: d.container, + path: path, + }, maxChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.bw.Flush() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.client.DeleteBlob(w.driver.container, w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return w.bw.Flush() +} + +type blockWriter struct { + client azure.BlobStorageClient + container string + path string +} + +func (bw *blockWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += maxChunkSize { + chunkSize := maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize]) + if err != nil { + return n, err + } + + n += chunkSize + } + + return n, nil } diff --git a/docs/storage/driver/azure/blockblob.go b/docs/storage/driver/azure/blockblob.go deleted file mode 100644 index 1c1df899..00000000 --- a/docs/storage/driver/azure/blockblob.go +++ /dev/null @@ -1,24 +0,0 @@ -package azure - -import ( - "fmt" - "io" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// azureBlockStorage is adaptor between azure.BlobStorageClient and -// blockStorage interface. -type azureBlockStorage struct { - azure.BlobStorageClient -} - -func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) -} - -func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { - a := azureBlockStorage{} - a.BlobStorageClient = b - return a -} diff --git a/docs/storage/driver/azure/blockblob_test.go b/docs/storage/driver/azure/blockblob_test.go deleted file mode 100644 index 7ce47195..00000000 --- a/docs/storage/driver/azure/blockblob_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type StorageSimulator struct { - blobs map[string]*BlockBlob -} - -type BlockBlob struct { - blocks map[string]*DataBlock - blockList []string -} - -type DataBlock struct { - data []byte - committed bool -} - -func (s *StorageSimulator) path(container, blob string) string { - return fmt.Sprintf("%s/%s", container, blob) -} - -func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { - _, ok := s.blobs[s.path(container, blob)] - return ok, nil -} - -func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return nil, fmt.Errorf("blob not found") - } - - var readers []io.Reader - for _, bID := range bb.blockList { - readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) - } - return ioutil.NopCloser(io.MultiReader(readers...)), nil -} - -func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - r, err := s.GetBlob(container, blob) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil -} - -func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { - path := s.path(container, blob) - bb := &BlockBlob{ - blocks: make(map[string]*DataBlock), - blockList: []string{}, - } - s.blobs[path] = bb - return nil -} - -func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { - path := s.path(container, blob) - bb, ok := s.blobs[path] - if !ok { - return fmt.Errorf("blob not found") - } - data := make([]byte, len(chunk)) - copy(data, chunk) - bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob - return nil -} - -func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { - resp := azure.BlockListResponse{} - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return resp, fmt.Errorf("blob not found") - } - - // Iterate committed blocks (in order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for _, blockID := range bb.blockList { - b := bb.blocks[blockID] - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - resp.CommittedBlocks = append(resp.CommittedBlocks, block) - } - - } - - // Iterate uncommitted blocks (in no order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for blockID, b := range bb.blocks { - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - if !b.committed { - resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) - } - } - } - return resp, nil -} - -func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return fmt.Errorf("blob not found") - } - - var blockIDs []string - for _, v := range blocks { - bl, ok := bb.blocks[v.ID] - if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.ID) - } - bl.committed = true - blockIDs = append(blockIDs, v.ID) - } - - // Mark all other blocks uncommitted - for k, b := range bb.blocks { - inList := false - for _, v := range blockIDs { - if k == v { - inList = true - break - } - } - if !inList { - b.committed = false - } - } - - bb.blockList = blockIDs - return nil -} - -func NewStorageSimulator() StorageSimulator { - return StorageSimulator{ - blobs: make(map[string]*BlockBlob), - } -} diff --git a/docs/storage/driver/azure/blockid.go b/docs/storage/driver/azure/blockid.go deleted file mode 100644 index 776c7cd5..00000000 --- a/docs/storage/driver/azure/blockid.go +++ /dev/null @@ -1,60 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "math/rand" - "sync" - "time" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type blockIDGenerator struct { - pool map[string]bool - r *rand.Rand - m sync.Mutex -} - -// Generate returns an unused random block id and adds the generated ID -// to list of used IDs so that the same block name is not used again. -func (b *blockIDGenerator) Generate() string { - b.m.Lock() - defer b.m.Unlock() - - var id string - for { - id = toBlockID(int(b.r.Int())) - if !b.exists(id) { - break - } - } - b.pool[id] = true - return id -} - -func (b *blockIDGenerator) exists(id string) bool { - _, used := b.pool[id] - return used -} - -func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { - b.m.Lock() - defer b.m.Unlock() - - for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { - b.pool[bl.Name] = true - } -} - -func newBlockIDGenerator() *blockIDGenerator { - return &blockIDGenerator{ - pool: make(map[string]bool), - r: rand.New(rand.NewSource(time.Now().UnixNano()))} -} - -// toBlockId converts given integer to base64-encoded block ID of a fixed length. -func toBlockID(i int) string { - s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs - return base64.StdEncoding.EncodeToString([]byte(s)) -} diff --git a/docs/storage/driver/azure/blockid_test.go b/docs/storage/driver/azure/blockid_test.go deleted file mode 100644 index aab70202..00000000 --- a/docs/storage/driver/azure/blockid_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package azure - -import ( - "math" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func Test_blockIdGenerator(t *testing.T) { - r := newBlockIDGenerator() - - for i := 1; i <= 10; i++ { - if expected := i - 1; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - if id := r.Generate(); id == "" { - t.Fatal("returned empty id") - } - if expected := i; len(r.pool) != expected { - t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) - } - } -} - -func Test_blockIdGenerator_Feed(t *testing.T) { - r := newBlockIDGenerator() - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed empty list - blocks := azure.BlockListResponse{} - r.Feed(blocks) - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed blocks - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - {"2", 2}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed same block IDs with committed/uncommitted place changed - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } -} - -func Test_toBlockId(t *testing.T) { - min := 0 - max := math.MaxInt64 - - if len(toBlockID(min)) != len(toBlockID(max)) { - t.Fatalf("different-sized blockIDs are returned") - } -} diff --git a/docs/storage/driver/azure/randomwriter.go b/docs/storage/driver/azure/randomwriter.go deleted file mode 100644 index f18692d0..00000000 --- a/docs/storage/driver/azure/randomwriter.go +++ /dev/null @@ -1,208 +0,0 @@ -package azure - -import ( - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// blockStorage is the interface required from a block storage service -// client implementation -type blockStorage interface { - CreateBlockBlob(container, blob string) error - GetBlob(container, blob string) (io.ReadCloser, error) - GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) - PutBlock(container, blob, blockID string, chunk []byte) error - GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) - PutBlockList(container, blob string, blocks []azure.Block) error -} - -// randomBlobWriter enables random access semantics on Azure block blobs -// by enabling writing arbitrary length of chunks to arbitrary write offsets -// within the blob. Normally, Azure Blob Storage does not support random -// access semantics on block blobs; however, this writer can download, split and -// reupload the overlapping blocks and discards those being overwritten entirely. -type randomBlobWriter struct { - bs blockStorage - blockSize int -} - -func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { - return randomBlobWriter{bs: bs, blockSize: blockSize} -} - -// WriteBlobAt writes the given chunk to the specified position of an existing blob. -// The offset must be equals to size of the blob or smaller than it. -func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { - rand := newBlockIDGenerator() - - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - rand.Feed(blocks) // load existing block IDs - - // Check for write offset for existing blob - size := getBlobSize(blocks) - if offset < 0 || offset > size { - return 0, fmt.Errorf("wrong offset for Write: %v", offset) - } - - // Upload the new chunk as blocks - blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) - if err != nil { - return 0, err - } - - // For non-append operations, existing blocks may need to be splitted - if offset != size { - // Split the block on the left end (if any) - leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) - if err != nil { - return 0, err - } - blockList = append(leftBlocks, blockList...) - - // Split the block on the right end (if any) - rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) - if err != nil { - return 0, err - } - blockList = append(blockList, rightBlocks...) - } else { - // Use existing block list - var existingBlocks []azure.Block - for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } - blockList = append(existingBlocks, blockList...) - } - // Put block list - return nn, r.bs.PutBlockList(container, blob, blockList) -} - -func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - return getBlobSize(blocks), nil -} - -// writeChunkToBlocks writes given chunk to one or multiple blocks within specified -// blob and returns their block representations. Those blocks are not committed, yet -func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { - var newBlocks []azure.Block - var nn int64 - - // Read chunks of at most size N except the last chunk to - // maximize block size and minimize block count. - buf := make([]byte, r.blockSize) - for { - n, err := io.ReadFull(chunk, buf) - if err == io.EOF { - break - } - nn += int64(n) - data := buf[:n] - blockID := rand.Generate() - if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { - return newBlocks, nn, err - } - newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) - } - return newBlocks, nn, nil -} - -// blocksLeftSide returns the blocks that are going to be at the left side of -// the writeOffset: [0, writeOffset) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { - var left []azure.Block - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return left, err - } - - o := writeOffset - elapsed := int64(0) - for _, v := range bx.CommittedBlocks { - blkSize := int64(v.Size) - if o >= blkSize { // use existing block - left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - o -= blkSize - elapsed += blkSize - } else if o > 0 { // current block needs to be splitted - start := elapsed - size := o - part, err := r.bs.GetSectionReader(container, blob, start, size) - if err != nil { - return left, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return left, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return left, err - } - left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - break - } - } - return left, nil -} - -// blocksRightSide returns the blocks that are going to be at the right side of -// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { - var right []azure.Block - - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return nil, err - } - - re := writeOffset + chunkSize - 1 // right end of written chunk - var elapsed int64 - for _, v := range bx.CommittedBlocks { - var ( - bs = elapsed // left end of current block - be = elapsed + int64(v.Size) - 1 // right end of current block - ) - - if bs > re { // take the block as is - right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } else if be > re { // current block needs to be splitted - part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) - if err != nil { - return right, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return right, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return right, err - } - right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - } - elapsed += int64(v.Size) - } - return right, nil -} - -func getBlobSize(blocks azure.BlockListResponse) int64 { - var n int64 - for _, v := range blocks.CommittedBlocks { - n += int64(v.Size) - } - return n -} diff --git a/docs/storage/driver/azure/randomwriter_test.go b/docs/storage/driver/azure/randomwriter_test.go deleted file mode 100644 index 32c2509e..00000000 --- a/docs/storage/driver/azure/randomwriter_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package azure - -import ( - "bytes" - "io" - "io/ioutil" - "math/rand" - "reflect" - "strings" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func TestRandomWriter_writeChunkToBlocks(t *testing.T) { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 3) - rand := newBlockIDGenerator() - c := []byte("AAABBBCCCD") - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) - if err != nil { - t.Fatal(err) - } - if expected := int64(len(c)); nn != expected { - t.Fatalf("wrong nn:%v, expected:%v", nn, expected) - } - if expected := 4; len(bw) != expected { - t.Fatal("unexpected written block count") - } - - bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) - if err != nil { - t.Fatal(err) - } - if expected := 0; len(bx.CommittedBlocks) != expected { - t.Fatal("unexpected committed block count") - } - if expected := 4; len(bx.UncommittedBlocks) != expected { - t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) - } - - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - assertBlobContents(t, r, c) -} - -func TestRandomWriter_blocksLeftSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, "", []azure.BlockStatus{}}, // write to beginning, discard all - {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change - {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 - {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block - {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block - {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_blocksRightSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - size int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob - {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block - {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block - {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains - {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block - {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block - {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index - {13, 20, "", []azure.BlockStatus{}}, // append to the end - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_Write_NewBlob(t *testing.T) { - var ( - s = NewStorageSimulator() - rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks - blob = randomContents(1024 * 7) // 7 KB blob - ) - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(blob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if len(bx.CommittedBlocks) != 3 { - t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) - } - - // Replace first 512 bytes - leftChunk := randomContents(512) - blob = append(leftChunk, blob[512:]...) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(leftChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 4; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace last 512 bytes with 1024 bytes - rightChunk := randomContents(1024) - offset := int64(len(blob) - 512) - blob = append(blob[:offset], rightChunk...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(rightChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 5; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace 2K-4K (overlaps 2 blocks from L/R) - newChunk := randomContents(1024 * 2) - offset = 1024 * 2 - blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 6; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } - - // Replace the entire blob - newBlob := randomContents(1024 * 30) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newBlob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, newBlob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 10; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { - t.Fatalf("committed block size does not indicate blob size") - } -} - -func Test_getBlobSize(t *testing.T) { - // with some committed blocks - if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } - - // with no committed blocks - if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ - UncommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } -} - -func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { - out, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(out, expected) { - t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) - } -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} diff --git a/docs/storage/driver/azure/zerofillwriter.go b/docs/storage/driver/azure/zerofillwriter.go deleted file mode 100644 index 095489d2..00000000 --- a/docs/storage/driver/azure/zerofillwriter.go +++ /dev/null @@ -1,49 +0,0 @@ -package azure - -import ( - "bytes" - "io" -) - -type blockBlobWriter interface { - GetSize(container, blob string) (int64, error) - WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) -} - -// zeroFillWriter enables writing to an offset outside a block blob's size -// by offering the chunk to the underlying writer as a contiguous data with -// the gap in between filled with NUL (zero) bytes. -type zeroFillWriter struct { - blockBlobWriter -} - -func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { - w := zeroFillWriter{} - w.blockBlobWriter = b - return w -} - -// Write writes the given chunk to the specified existing blob even though -// offset is out of blob's size. The gaps are filled with zeros. Returned -// written number count does not include zeros written. -func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { - size, err := z.blockBlobWriter.GetSize(container, blob) - if err != nil { - return 0, err - } - - var reader io.Reader - var zeroPadding int64 - if offset <= size { - reader = chunk - } else { - zeroPadding = offset - size - offset = size // adjust offset to be the append index - zeros := bytes.NewReader(make([]byte, zeroPadding)) - reader = io.MultiReader(zeros, chunk) - } - - nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) - nn -= zeroPadding - return nn, err -} diff --git a/docs/storage/driver/azure/zerofillwriter_test.go b/docs/storage/driver/azure/zerofillwriter_test.go deleted file mode 100644 index 49361791..00000000 --- a/docs/storage/driver/azure/zerofillwriter_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package azure - -import ( - "bytes" - "testing" -) - -func Test_zeroFillWrite_AppendNoGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*1) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(firstChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, secondChunk...)) - } - -} - -func Test_zeroFillWrite_StartWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - chunk := randomContents(1024 * 5) - padding := int64(1024*2 + 256) - if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(chunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(make([]byte, padding), chunk...)) - } -} - -func Test_zeroFillWrite_AppendWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - padding := int64(1024 * 4) - if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) - } -} - -func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024 * 3) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - // in this case, zerofill won't be used - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) - } -} diff --git a/docs/storage/driver/base/base.go b/docs/storage/driver/base/base.go index c816d2d6..064bda60 100644 --- a/docs/storage/driver/base/base.go +++ b/docs/storage/driver/base/base.go @@ -102,10 +102,10 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) } -// ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { ctx, done := context.WithTrace(ctx) - defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} @@ -115,25 +115,21 @@ func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - rc, e := base.StorageDriver.ReadStream(ctx, path, offset) + rc, e := base.StorageDriver.Reader(ctx, path, offset) return rc, base.setDriverName(e) } -// WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { ctx, done := context.WithTrace(ctx) - defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} - } + defer done("%s.Writer(%q, %v)", base.Name(), path, append) if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) - return i64, base.setDriverName(e) + writer, e := base.StorageDriver.Writer(ctx, path, append) + return writer, base.setDriverName(e) } // Stat wraps Stat of underlying storage driver. diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 5b495818..3bbdc637 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -1,6 +1,7 @@ package filesystem import ( + "bufio" "bytes" "fmt" "io" @@ -78,7 +79,7 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) + rc, err := d.Reader(ctx, path, 0) if err != nil { return nil, err } @@ -94,16 +95,22 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { - if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { return err } - - return os.Truncate(d.fullPath(subPath), int64(len(contents))) + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + writer.Cancel() + return err + } + return writer.Commit() } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -125,40 +132,36 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return file, nil } -// WriteStream stores the contents of the provided io.Reader at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { - // TODO(stevvooe): This needs to be a requirement. - // if !path.IsAbs(subPath) { - // return fmt.Errorf("absolute path required: %q", subPath) - // } - +func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { fullPath := d.fullPath(subPath) parentDir := path.Dir(fullPath) if err := os.MkdirAll(parentDir, 0777); err != nil { - return 0, err + return nil, err } fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - // TODO(stevvooe): A few missing conditions in storage driver: - // 1. What if the path is already a directory? - // 2. Should number 1 be exposed explicitly in storagedriver? - // 2. Can this path not exist, even if we create above? - return 0, err - } - defer fp.Close() - - nn, err = fp.Seek(offset, os.SEEK_SET) - if err != nil { - return 0, err + return nil, err } - if nn != offset { - return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) + var offset int64 + + if !append { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, os.SEEK_END) + if err != nil { + fp.Close() + return nil, err + } + offset = int64(n) } - return io.Copy(fp, reader) + return newFileWriter(fp, offset), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -286,3 +289,88 @@ func (fi fileInfo) ModTime() time.Time { func (fi fileInfo) IsDir() bool { return fi.FileInfo.IsDir() } + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit() error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/docs/storage/driver/inmemory/driver.go b/docs/storage/driver/inmemory/driver.go index b5735c0a..eb2fd1cf 100644 --- a/docs/storage/driver/inmemory/driver.go +++ b/docs/storage/driver/inmemory/driver.go @@ -1,7 +1,6 @@ package inmemory import ( - "bytes" "fmt" "io" "io/ioutil" @@ -74,7 +73,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - rc, err := d.ReadStream(ctx, path, 0) + rc, err := d.Reader(ctx, path, 0) if err != nil { return nil, err } @@ -88,7 +87,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro d.mutex.Lock() defer d.mutex.Unlock() - f, err := d.root.mkfile(p) + normalized := normalize(p) + + f, err := d.root.mkfile(normalized) if err != nil { // TODO(stevvooe): Again, we need to clarify when this is not a // directory in StorageDriver API. @@ -101,9 +102,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro return nil } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -111,10 +112,10 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } - path = normalize(path) - found := d.root.find(path) + normalized := normalize(path) + found := d.root.find(normalized) - if found.path() != path { + if found.path() != normalized { return nil, storagedriver.PathNotFoundError{Path: path} } @@ -125,46 +126,24 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil } -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { d.mutex.Lock() defer d.mutex.Unlock() - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - normalized := normalize(path) f, err := d.root.mkfile(normalized) if err != nil { - return 0, fmt.Errorf("not a file") + return nil, fmt.Errorf("not a file") } - // Unlock while we are reading from the source, in case we are reading - // from the same mfs instance. This can be fixed by a more granular - // locking model. - d.mutex.Unlock() - d.mutex.RLock() // Take the readlock to block other writers. - var buf bytes.Buffer - - nn, err = buf.ReadFrom(reader) - if err != nil { - // TODO(stevvooe): This condition is odd and we may need to clarify: - // we've read nn bytes from reader but have written nothing to the - // backend. What is the correct return value? Really, the caller needs - // to know that the reader has been advanced and reattempting the - // operation is incorrect. - d.mutex.RUnlock() - d.mutex.Lock() - return nn, err + if !append { + f.truncate() } - d.mutex.RUnlock() - d.mutex.Lock() - f.WriteAt(buf.Bytes(), offset) - return nn, err + return d.newWriter(f), nil } // Stat returns info about the provided path. @@ -173,7 +152,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, defer d.mutex.RUnlock() normalized := normalize(path) - found := d.root.find(path) + found := d.root.find(normalized) if found.path() != normalized { return nil, storagedriver.PathNotFoundError{Path: path} @@ -260,3 +239,74 @@ func (d *driver) Delete(ctx context.Context, path string) error { func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod{} } + +type writer struct { + d *driver + f *file + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(f *file) storagedriver.FileWriter { + return &writer{ + d: d, + f: f, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.f.WriteAt(p, int64(len(w.f.data))) +} + +func (w *writer) Size() int64 { + w.d.mutex.RLock() + defer w.d.mutex.RUnlock() + + return int64(len(w.f.data)) +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.d.root.delete(w.f.path()) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return nil +} diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index 0e113680..eb617d73 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -20,10 +20,8 @@ import ( "reflect" "strconv" "strings" - "sync" "time" - "github.com/Sirupsen/logrus" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -103,9 +101,6 @@ type driver struct { Encrypt bool RootDirectory string StorageClass string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { @@ -302,11 +297,6 @@ func New(params DriverParameters) (*Driver, error) { Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, StorageClass: params.StorageClass, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) } return &Driver{ @@ -326,7 +316,7 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - reader, err := d.ReadStream(ctx, path, 0) + reader, err := d.Reader(ctx, path, 0) if err != nil { return nil, err } @@ -347,9 +337,9 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e return parseError(path, err) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { resp, err := d.S3.GetObject(&s3.GetObjectInput{ Bucket: aws.String(d.Bucket), Key: aws.String(d.s3Path(path)), @@ -366,372 +356,52 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp.Body, nil } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - var partNumber int64 = 1 - bytesRead := 0 - var putErrChan chan error - parts := []*s3.CompletedPart{} - done := make(chan struct{}) // stopgap to free up waiting goroutines - - resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - ContentType: d.getContentType(), - ACL: d.getACL(), - ServerSideEncryption: d.getEncryptionMode(), - StorageClass: d.getStorageClass(), - }) - if err != nil { - return 0, err - } - - uploadID := resp.UploadId - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - _, err := d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - UploadId: uploadID, - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: parts, - }, - }) - if err != nil { - // TODO (brianbland): log errors here - d.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - UploadId: uploadID, - }) - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the s3 - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying s3 library should handle it, it doesn't seem to - // be part of the shouldRetry function (see AdRoll/goamz/s3). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - resp, err := d.S3.UploadPart(&s3.UploadPartInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - PartNumber: aws.Int64(partNumber), - UploadId: uploadID, - Body: bytes.NewReader(buf[0 : int64(bytesRead)+from]), - }) - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, &s3.CompletedPart{ - ETag: resp.ETag, - PartNumber: aws.Int64(partNumber), - }) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.S3.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), }) if err != nil { - if s3Err, ok := err.(awserr.Error); !ok || s3Err.Code() != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil && resp.ContentLength != nil { - currentLength = *resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - PartNumber: aws.Int64(partNumber), - UploadId: uploadID, - CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), - CopySourceRange: aws.String("bytes=0-" + strconv.FormatInt(offset-1, 10)), - }) - if err != nil { - return 0, err - } - - parts = append(parts, &s3.CompletedPart{ - ETag: resp.CopyPartResult.ETag, - PartNumber: aws.Int64(partNumber), - }) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - resp, err := d.S3.UploadPart(&s3.UploadPartInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - PartNumber: aws.Int64(partNumber), - UploadId: uploadID, - Body: bytes.NewReader(d.zeros), - }) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, &s3.CompletedPart{ - ETag: resp.ETag, - PartNumber: aws.Int64(partNumber), - }) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - resp, err := d.S3.UploadPart(&s3.UploadPartInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - PartNumber: aws.Int64(partNumber), - UploadId: uploadID, - Body: bytes.NewReader(buf), - }) - if err != nil { - return totalRead, err - } - - parts = append(parts, &s3.CompletedPart{ - ETag: resp.ETag, - PartNumber: aws.Int64(partNumber), - }) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ - Bucket: aws.String(d.Bucket), - Key: aws.String(d.s3Path(path)), - PartNumber: aws.Int64(partNumber), - UploadId: uploadID, - CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), - }) - if err != nil { - return 0, err - } - - parts = append(parts, &s3.CompletedPart{ - ETag: resp.CopyPartResult.ETag, - PartNumber: aws.Int64(partNumber), - }) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - + return nil, err } + return d.newWriter(key, *resp.UploadId, nil), nil + } + resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + }) + if err != nil { + return nil, parseError(path, err) } - for { - if err = fromReader(0); err != nil { - return totalRead, err + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue } - - if int64(bytesRead) < d.ChunkSize { - break + resp, err := d.S3.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }) + if err != nil { + return nil, parseError(path, err) } + var multiSize int64 + for _, part := range resp.Parts { + multiSize += *part.Size + } + return d.newWriter(key, *multi.UploadId, resp.Parts), nil } - - return totalRead, nil + return nil, storagedriver.PathNotFoundError{Path: path} } // Stat retrieves the FileInfo for the given path, including the current size @@ -971,12 +641,258 @@ func (d *driver) getStorageClass() *string { return aws.String(d.StorageClass) } -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool } -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) +func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + var completedParts []*s3.CompletedPart + for _, part := range w.parts { + completedParts = append(completedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return 0, err + } + + resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }) + defer resp.Body.Close() + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart, err = ioutil.ReadAll(resp.Body) + if err != nil { + return 0, err + } + } else { + // Otherwise we can use the old file as the new first part + copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + var completedParts []*s3.CompletedPart + for _, part := range w.parts { + completedParts = append(completedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + partNumber := aws.Int64(int64(len(w.parts) + 1)) + resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(w.readyPart), + }) + if err != nil { + return err + } + w.parts = append(w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(len(w.readyPart))), + }) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil } diff --git a/docs/storage/driver/s3-goamz/s3.go b/docs/storage/driver/s3-goamz/s3.go index 9208965b..aa2d31b7 100644 --- a/docs/storage/driver/s3-goamz/s3.go +++ b/docs/storage/driver/s3-goamz/s3.go @@ -21,10 +21,8 @@ import ( "reflect" "strconv" "strings" - "sync" "time" - "github.com/Sirupsen/logrus" "github.com/docker/goamz/aws" "github.com/docker/goamz/s3" @@ -79,9 +77,6 @@ type driver struct { Encrypt bool RootDirectory string StorageClass s3.StorageClass - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { @@ -301,11 +296,6 @@ func New(params DriverParameters) (*Driver, error) { Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, StorageClass: params.StorageClass, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) } return &Driver{ @@ -337,9 +327,9 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") @@ -354,343 +344,37 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp.Body, nil } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []s3.Part{} - var part s3.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") if err != nil { - return 0, err + return nil, parseError(path, err) } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } + for _, multi := range multis { + if key != multi.Key { + continue } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) + parts, err := multi.ListParts() if err != nil { - return err + return nil, parseError(path, err) } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - + var multiSize int64 + for _, part := range parts { + multiSize += part.Size } - return nil + return d.newWriter(key, multi, parts), nil } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the s3 - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying s3 library should handle it, it doesn't seem to - // be part of the shouldRetry function (see AdRoll/goamz/s3). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part s3.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the s3 package does not. We may add s3 - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to s3 slows to a crawl. If the RequestTimeout - // ends up getting added to the s3 library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *s3.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.s3Path(path), nil) - if err != nil { - if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil + return nil, storagedriver.PathNotFoundError{Path: path} } // Stat retrieves the FileInfo for the given path, including the current size @@ -882,12 +566,181 @@ func (d *driver) getContentType() string { return "application/octet-stream" } -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *s3.Multi + parts []s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool } -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) +func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []s3.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil } diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index 603020f1..2ae9a67e 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -49,15 +49,14 @@ type StorageDriver interface { // This should primarily be used for small objects. PutContent(ctx context.Context, path string, content []byte) error - // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) - // WriteStream stores the contents of the provided io.ReadCloser at a - // location designated by the given path. - // May be used to resume writing a stream by providing a nonzero offset. - WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + Writer(ctx context.Context, path string, append bool) (FileWriter, error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. @@ -83,6 +82,25 @@ type StorageDriver interface { URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) } +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel() error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit() error +} + // PathRegexp is the regular expression which each file path must match. A // file path is absolute, beginning with a slash and containing a positive // number of path components separated by slashes, where each component is diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 3ff4e1e6..48d90ed8 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -282,11 +282,19 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { var fileSize int64 = 5 * 1024 * 1024 * 1024 contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) + + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, io.TeeReader(contents, checksum)) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, fileSize) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -296,9 +304,9 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) } -// TestReadStreamWithOffset tests that the appropriate data is streamed when +// TestReaderWithOffset tests that the appropriate data is streamed when // reading with a given offset. -func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { +func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) @@ -311,7 +319,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) c.Assert(err, check.IsNil) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -320,7 +328,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize) c.Assert(err, check.IsNil) defer reader.Close() @@ -329,7 +337,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*2) c.Assert(err, check.IsNil) defer reader.Close() @@ -338,7 +346,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(readContents, check.DeepEquals, contentsChunk3) // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) @@ -347,7 +355,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { // Read past the end of the content and make sure we get a reader that // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3) c.Assert(err, check.IsNil) defer reader.Close() @@ -357,7 +365,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { c.Assert(n, check.Equals, 0) // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) + reader, err = suite.StorageDriver.Reader(suite.ctx, filename, chunkSize*3-1) c.Assert(err, check.IsNil) defer reader.Close() @@ -395,78 +403,51 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) contentsChunk3 := randomContents(chunkSize) - contentsChunk4 := randomContents(chunkSize) - zeroChunk := make([]byte, int64(chunkSize)) fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contentsChunk1)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) + curSize := writer.Size() + c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) + err = writer.Close() + c.Assert(err, check.IsNil) + + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) + c.Assert(err, check.IsNil) + c.Assert(writer.Size(), check.Equals, curSize) + + nn, err = io.Copy(writer, bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) + curSize = writer.Size() + c.Assert(curSize, check.Equals, 2*chunkSize) - // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) + err = writer.Close() c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) + c.Assert(writer.Size(), check.Equals, curSize) - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + nn, err = io.Copy(writer, bytes.NewReader(fullContents[curSize:])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[curSize:]))) + + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, fullContents) - - // Writing past size of file extends file (no offset error). We would like - // to write chunk 4 one chunk length past chunk 3. It should be successful - // and the resulting file will be 5 chunks long, with a chunk of all - // zeros. - - fullContents = append(fullContents, zeroChunk...) - fullContents = append(fullContents, contentsChunk4...) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, chunkSize) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - - received, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(len(received), check.Equals, len(fullContents)) - c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) - c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) - c.Assert(received, check.DeepEquals, fullContents) - - // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestReadNonexistentStream tests that reading a stream for a nonexistent path @@ -474,12 +455,12 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomPath(32) - _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + _, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) + _, err = suite.StorageDriver.Reader(suite.ctx, filename, 64) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) @@ -800,7 +781,7 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { // TestPutContentMultipleTimes checks that if storage driver can overwrite the content // in the subsequent puts. Validates that PutContent does not have to work -// with an offset like WriteStream does and overwrites the file entirely +// with an offset like Writer does and overwrites the file entirely // rather than writing the data to the [0,len(data)) of the file. func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { filename := randomPath(32) @@ -842,7 +823,7 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { readContents := func() { defer wg.Done() offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) @@ -858,7 +839,7 @@ func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { } // TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to WriteStream concurrently without hanging. +// in to Writer concurrently without hanging. func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { numStreams := 32 @@ -882,53 +863,54 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { wg.Wait() } +// TODO (brianbland): evaluate the relevancy of this test // TestEventualConsistency checks that if stat says that a file is a certain size, then // you can freely read from the file (this is the only guarantee that the driver needs to provide) -func (suite *DriverSuite) TestEventualConsistency(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.deletePath(c, firstPart(filename)) - - var offset int64 - var misswrites int - var chunkSize int64 = 32 - - for i := 0; i < 1024; i++ { - contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - - // We are most concerned with being able to read data as soon as Stat declares - // it is uploaded. This is the strongest guarantee that some drivers (that guarantee - // at best eventual consistency) absolutely need to provide. - if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) - - reader.Close() - offset += read - } else { - misswrites++ - } - } - - if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") - } - - c.Assert(misswrites, check.Not(check.Equals), 1024) -} +// func (suite *DriverSuite) TestEventualConsistency(c *check.C) { +// if testing.Short() { +// c.Skip("Skipping test in short mode") +// } +// +// filename := randomPath(32) +// defer suite.deletePath(c, firstPart(filename)) +// +// var offset int64 +// var misswrites int +// var chunkSize int64 = 32 +// +// for i := 0; i < 1024; i++ { +// contents := randomContents(chunkSize) +// read, err := suite.StorageDriver.Writer(suite.ctx, filename, offset, bytes.NewReader(contents)) +// c.Assert(err, check.IsNil) +// +// fi, err := suite.StorageDriver.Stat(suite.ctx, filename) +// c.Assert(err, check.IsNil) +// +// // We are most concerned with being able to read data as soon as Stat declares +// // it is uploaded. This is the strongest guarantee that some drivers (that guarantee +// // at best eventual consistency) absolutely need to provide. +// if fi.Size() == offset+chunkSize { +// reader, err := suite.StorageDriver.Reader(suite.ctx, filename, offset) +// c.Assert(err, check.IsNil) +// +// readContents, err := ioutil.ReadAll(reader) +// c.Assert(err, check.IsNil) +// +// c.Assert(readContents, check.DeepEquals, contents) +// +// reader.Close() +// offset += read +// } else { +// misswrites++ +// } +// } +// +// if misswrites > 0 { +// c.Log("There were " + string(misswrites) + " occurrences of a write not being instantly available.") +// } +// +// c.Assert(misswrites, check.Not(check.Equals), 1024) +// } // BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { @@ -968,22 +950,22 @@ func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { } } -// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files +// BenchmarkStreamEmptyFiles benchmarks Writer/Reader for 0B files func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { suite.benchmarkStreamFiles(c, 0) } -// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files +// BenchmarkStream1KBFiles benchmarks Writer/Reader for 1KB files func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024) } -// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files +// BenchmarkStream1MBFiles benchmarks Writer/Reader for 1MB files func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024*1024) } -// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files +// BenchmarkStream1GBFiles benchmarks Writer/Reader for 1GB files func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024*1024*1024) } @@ -998,11 +980,18 @@ func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + written, err := io.Copy(writer, bytes.NewReader(randomContents(size))) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, size) - rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + rc, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.IsNil) rc.Close() } @@ -1083,11 +1072,18 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf.Sync() tf.Seek(0, os.SEEK_SET) - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, tf) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, size) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() @@ -1112,11 +1108,18 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { defer suite.deletePath(c, firstPart(filename)) - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) + writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) + c.Assert(err, check.IsNil) + nn, err := io.Copy(writer, bytes.NewReader(contents)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) + err = writer.Commit() + c.Assert(err, check.IsNil) + err = writer.Close() + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.Reader(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() diff --git a/docs/storage/filereader.go b/docs/storage/filereader.go index b3a5f520..3b06c817 100644 --- a/docs/storage/filereader.go +++ b/docs/storage/filereader.go @@ -119,7 +119,7 @@ func (fr *fileReader) reader() (io.Reader, error) { } // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: diff --git a/docs/storage/filewriter.go b/docs/storage/filewriter.go deleted file mode 100644 index 7c68f346..00000000 --- a/docs/storage/filewriter.go +++ /dev/null @@ -1,135 +0,0 @@ -package storage - -import ( - "bytes" - "fmt" - "io" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -// fileWriter implements a remote file writer backed by a storage driver. -type fileWriter struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - - // mutable fields - size int64 // size of the file, aka the current end - offset int64 // offset is the current write offset - err error // terminal error, if set, reader is closed -} - -// fileWriterInterface makes the desired io compliant interface that the -// filewriter should implement. -type fileWriterInterface interface { - io.WriteSeeker - io.ReaderFrom - io.Closer -} - -var _ fileWriterInterface = &fileWriter{} - -// newFileWriter returns a prepared fileWriter for the driver and path. This -// could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*fileWriter, error) { - fw := fileWriter{ - driver: driver, - path: path, - ctx: ctx, - } - - if fi, err := driver.Stat(ctx, path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot write to a directory") - } - - fw.size = fi.Size() - } - - return &fw, nil -} - -// Write writes the buffer p at the current write offset. -func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.ReadFrom(bytes.NewReader(p)) - return int(nn), err -} - -// ReadFrom reads reader r until io.EOF writing the contents at the current -// offset. -func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) - - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - - return nn, err -} - -// Seek moves the write position do the requested offest based on the whence -// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. -func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { - if fw.err != nil { - return 0, fw.err - } - - var err error - newOffset := fw.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fw.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - // No problems, set the offset. - fw.offset = newOffset - } - - return fw.offset, err -} - -// Close closes the fileWriter for writing. -// Calling it once is valid and correct and it will -// return a nil error. Calling it subsequent times will -// detect that fw.err has been set and will return the error. -func (fw *fileWriter) Close() error { - if fw.err != nil { - return fw.err - } - - fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - - return nil -} diff --git a/docs/storage/filewriter_test.go b/docs/storage/filewriter_test.go deleted file mode 100644 index d6782cd4..00000000 --- a/docs/storage/filewriter_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "io" - "os" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -// TestSimpleWrite takes the fileWriter through common write operations -// ensuring data integrity. -func TestSimpleWrite(t *testing.T) { - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read did't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - ctx := context.Background() - - fw, err := newFileWriter(ctx, driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - n, err = fw.Write(content) - if err != nil { - t.Fatalf("unexpected error writing content: %v", err) - } - - if n != len(content) { - t.Fatalf("unexpected write length: %d != %d", n, len(content)) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check the seek position is equal to the content length - end, err := fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) - } - - // Double the content - doubled := append(content, content...) - doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) - if err != nil { - t.Fatalf("unexpected error digesting doubled content: %v", err) - } - - nn, err := fw.ReadFrom(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error doubling content: %v", err) - } - - if nn != int64(len(content)) { - t.Fatalf("writeat was short: %d != %d", n, len(content)) - } - - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check that Write updated the offset. - end, err = fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(doubled)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) - } - - // Now, we copy from one path to another, running the data through the - // fileReader to fileWriter, rather than the driver.Move command to ensure - // everything is working correctly. - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - fw, err = newFileWriter(ctx, driver, "/copied") - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - nn, err = io.Copy(fw, fr) - if err != nil { - t.Fatalf("unexpected error copying data: %v", err) - } - - if nn != int64(len(doubled)) { - t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) - } - - fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } -} - -func BenchmarkFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - for i := 0; i < b.N; i++ { - // Start basic fileWriter initialization - fw := fileWriter{ - driver: inmemory.New(), - path: "/random", - } - ctx := context.Background() - if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) - } - } else { - if fi.IsDir() { - b.Fatalf("Cannot write to a directory") - } - - fw.size = fi.Size() - } - - randomBytes := make([]byte, 1<<20) - _, err := rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - // End basic file writer initialization - - b.StartTimer() - for j := 0; j < 100; j++ { - fw.Write(randomBytes) - } - b.StopTimer() - } -} - -func BenchmarkfileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - ctx := context.Background() - for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) - } - - randomBytes := make([]byte, 1<<20) - _, err = rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - - b.StartTimer() - for j := 0; j < 100; j++ { - bfw.Write(randomBytes) - } - b.StopTimer() - } -} diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 76a1c29d..e06f9540 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -179,7 +179,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. return nil, err } - return lbs.newBlobUpload(ctx, uuid, path, startedAt) + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) } func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -218,7 +218,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution return nil, err } - return lbs.newBlobUpload(ctx, id, path, startedAt) + return lbs.newBlobUpload(ctx, id, path, startedAt, true) } func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { @@ -312,18 +312,21 @@ func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Name } // newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { - fw, err := newFileWriter(ctx, lbs.driver, path) +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) if err != nil { return nil, err } bw := &blobWriter{ - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - fileWriter: *fw, + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + fileWriter: fw, + driver: lbs.driver, + path: path, resumableDigestEnabled: lbs.resumableDigestEnabled, } From 34891eb7ab3bad5edd98fe97cffe66d561afd7ef Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Fri, 12 Feb 2016 17:49:37 +0000 Subject: [PATCH 429/501] StorageDriver: Testsuite: call Close before getting Size Signed-off-by: Arthur Baars --- docs/storage/driver/testsuites/testsuites.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/storage/driver/testsuites/testsuites.go b/docs/storage/driver/testsuites/testsuites.go index 48d90ed8..de8e3143 100644 --- a/docs/storage/driver/testsuites/testsuites.go +++ b/docs/storage/driver/testsuites/testsuites.go @@ -412,12 +412,12 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - curSize := writer.Size() - c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) - err = writer.Close() c.Assert(err, check.IsNil) + curSize := writer.Size() + c.Assert(curSize, check.Equals, int64(len(contentsChunk1))) + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) c.Assert(err, check.IsNil) c.Assert(writer.Size(), check.Equals, curSize) @@ -426,12 +426,12 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - curSize = writer.Size() - c.Assert(curSize, check.Equals, 2*chunkSize) - err = writer.Close() c.Assert(err, check.IsNil) + curSize = writer.Size() + c.Assert(curSize, check.Equals, 2*chunkSize) + writer, err = suite.StorageDriver.Writer(suite.ctx, filename, true) c.Assert(err, check.IsNil) c.Assert(writer.Size(), check.Equals, curSize) From 115a6e58034155ae089c6cd65438c1b3e3bbdb3a Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Fri, 12 Feb 2016 13:30:57 +0000 Subject: [PATCH 430/501] Storagedriver: GCS: implement resumable uploads Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 534 +++++++++++++++++++--------- docs/storage/driver/gcs/gcs_test.go | 102 +++++- 2 files changed, 467 insertions(+), 169 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 9d8a8458..14600dee 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -7,11 +7,8 @@ // Because gcs is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that gcs guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. +// Note that the contents of incomplete uploads are not accessible even though +// Stat returns their length // // +build include_gcs @@ -25,7 +22,9 @@ import ( "math/rand" "net/http" "net/url" + "regexp" "sort" + "strconv" "strings" "time" @@ -34,7 +33,6 @@ import ( "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/googleapi" - storageapi "google.golang.org/api/storage/v1" "google.golang.org/cloud" "google.golang.org/cloud/storage" @@ -46,8 +44,18 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "gcs" -const dummyProjectID = "" +const ( + driverName = "gcs" + dummyProjectID = "" + + uploadSessionContentType = "application/x-docker-upload-session" + minChunkSize = 256 * 1024 + maxChunkSize = 20 * minChunkSize + + maxTries = 5 +) + +var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) // driverParameters is a struct that encapsulates all of the driver parameters after all values have been set type driverParameters struct { @@ -155,7 +163,17 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(context, path, 0) + gcsContext := d.context(context) + name := d.pathToKey(path) + var rc io.ReadCloser + err := retry(func() error { + var err error + rc, err = storage.NewReader(gcsContext, d.bucket, name) + return err + }) + if err == storage.ErrObjectNotExist { + return nil, storagedriver.PathNotFoundError{Path: path} + } if err != nil { return nil, err } @@ -171,25 +189,53 @@ func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - _, err := wc.Write(contents) - return err + return retry(func() error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, contents) + }) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. -func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { - name := d.pathToKey(path) +func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) + if err != nil { + if res != nil { + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + } + return nil, err + } + if res.Header.Get("Content-Type") == uploadSessionContentType { + defer res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + return res.Body, nil +} + +func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { // copied from google.golang.org/cloud/storage#NewReader : // to set the additional "Range" header u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", d.bucket, name), + Path: fmt.Sprintf("/%s/%s", bucket, name), } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -198,122 +244,253 @@ func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io. if offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) } - res, err := d.client.Do(req) + var res *http.Response + err = retry(func() error { + var err error + res, err = client.Do(req) + return err + }) if err != nil { return nil, err } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, storagedriver.PathNotFoundError{Path: path} + return res, googleapi.CheckMediaResponse(res) +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { + writer := &writer{ + client: d.client, + bucket: d.bucket, + name: d.pathToKey(path), + buffer: make([]byte, maxChunkSize), } - if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { - res.Body.Close() - obj, err := storageStatObject(d.context(context), d.bucket, name) + + if append { + err := writer.init(path) if err != nil { return nil, err } - if offset == int64(obj.Size) { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } - if res.StatusCode < 200 || res.StatusCode > 299 { - res.Body.Close() - return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) - } - return res.Body, nil + return writer, nil } -// WriteStream stores the contents of the provided io.ReadCloser at a -// location designated by the given path. -// May be used to resume writing a stream by providing a nonzero offset. -// The offset must be no larger than the CurrentSize for this path. -func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } +type writer struct { + client *http.Client + bucket string + name string + size int64 + offset int64 + closed bool + sessionURI string + buffer []byte + buffSize int +} - if offset == 0 { - return d.writeCompletely(context, path, 0, reader) - } - - service, err := storageapi.New(d.client) +// Cancel removes any written content from this FileWriter. +func (w *writer) Cancel() error { + err := w.checkClosed() if err != nil { - return 0, err - } - objService := storageapi.NewObjectsService(service) - var obj *storageapi.Object - err = retry(5, func() error { - o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() - obj = o return err - }) - // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) + } + w.closed = true + err = storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) if err != nil { - return 0, err - } - - // cannot append more chunks, so redo from scratch - if obj.ComponentCount >= 1023 { - return d.writeCompletely(context, path, offset, reader) - } - - // skip from reader - objSize := int64(obj.Size) - nn, err := skip(reader, objSize-offset) - if err != nil { - return nn, err - } - - // Size <= offset - partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) - gcsContext := d.context(context) - wc := storage.NewWriter(gcsContext, d.bucket, partName) - wc.ContentType = "application/octet-stream" - - if objSize < offset { - err = writeZeros(wc, offset-objSize) - if err != nil { - wc.CloseWithError(err) - return nn, err + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + } + return err +} + +func (w *writer) Close() error { + if w.closed { + return nil + } + w.closed = true + + err := w.writeChunk() + if err != nil { + return err + } + + // Copy the remaining bytes from the buffer to the upload session + // Normally buffSize will be smaller than minChunkSize. However, in the + // unlikely event that the upload session failed to start, this number could be higher. + // In this case we can safely clip the remaining bytes to the minChunkSize + if w.buffSize > minChunkSize { + w.buffSize = minChunkSize + } + + // commit the writes by updating the upload session + err = retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = uploadSessionContentType + wc.Metadata = map[string]string{ + "Session-URI": w.sessionURI, + "Offset": strconv.FormatInt(w.offset, 10), + } + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil +} + +func putContentsClose(wc *storage.Writer, contents []byte) error { + size := len(contents) + var nn int + var err error + for nn < size { + n, err := wc.Write(contents[nn:size]) + nn += n + if err != nil { + break } } - n, err := io.Copy(wc, reader) if err != nil { wc.CloseWithError(err) - return nn, err + return err + } + return wc.Close() +} + +// Commit flushes all content written to this FileWriter and makes it +// available for future calls to StorageDriver.GetContent and +// StorageDriver.Reader. +func (w *writer) Commit() error { + + if err := w.checkClosed(); err != nil { + return err + } + w.closed = true + + // no session started yet just perform a simple upload + if w.sessionURI == "" { + err := retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil + } + size := w.offset + int64(w.buffSize) + var nn int + // loop must be performed at least once to ensure the file is committed even when + // the buffer is empty + for { + n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) + nn += int(n) + w.offset += n + w.size = w.offset + if err != nil { + w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) + return err + } + if nn == w.buffSize { + break + } + } + w.buffSize = 0 + return nil +} + +func (w *writer) checkClosed() error { + if w.closed { + return fmt.Errorf("Writer already closed") + } + return nil +} + +func (w *writer) writeChunk() error { + var err error + // chunks can be uploaded only in multiples of minChunkSize + // chunkSize is a multiple of minChunkSize less than or equal to buffSize + chunkSize := w.buffSize - (w.buffSize % minChunkSize) + if chunkSize == 0 { + return nil + } + // if their is no sessionURI yet, obtain one by starting the session + if w.sessionURI == "" { + w.sessionURI, err = startSession(w.client, w.bucket, w.name) } - err = wc.Close() if err != nil { - return nn, err + return err } - // wc was closed successfully, so the temporary part exists, schedule it for deletion at the end - // of the function - defer storageDeleteObject(gcsContext, d.bucket, partName) + nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) + w.offset += nn + if w.offset > w.size { + w.size = w.offset + } + // shift the remaining bytes to the start of the buffer + w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) - req := &storageapi.ComposeRequest{ - Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, - SourceObjects: []*storageapi.ComposeRequestSourceObjects{ - { - Name: obj.Name, - Generation: obj.Generation, - }, { - Name: partName, - Generation: wc.Object().Generation, - }}, + return err +} + +func (w *writer) Write(p []byte) (int, error) { + err := w.checkClosed() + if err != nil { + return 0, err } - err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) - if err == nil { - nn = nn + n + var nn int + for nn < len(p) { + n := copy(w.buffer[w.buffSize:], p[nn:]) + w.buffSize += n + if w.buffSize == cap(w.buffer) { + err = w.writeChunk() + if err != nil { + break + } + } + nn += n } - return nn, err } +// Size returns the number of bytes written to this FileWriter. +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) init(path string) error { + res, err := getObject(w.client, w.bucket, w.name, 0) + if err != nil { + return err + } + defer res.Body.Close() + if res.Header.Get("Content-Type") != uploadSessionContentType { + return storagedriver.PathNotFoundError{Path: path} + } + offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) + if err != nil { + return err + } + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") + w.buffSize = copy(w.buffer, buffer) + w.offset = offset + w.size = offset + int64(w.buffSize) + return nil +} + type request func() error -func retry(maxTries int, req request) error { +func retry(req request) error { backoff := time.Second var err error for i := 0; i < maxTries; i++ { @@ -335,53 +512,6 @@ func retry(maxTries int, req request) error { return err } -func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - - // Copy the first offset bytes of the existing contents - // (padded with zeros if needed) into the writer - if offset > 0 { - existing, err := d.ReadStream(context, path, 0) - if err != nil { - return 0, err - } - defer existing.Close() - n, err := io.CopyN(wc, existing, offset) - if err == io.EOF { - err = writeZeros(wc, offset-n) - } - if err != nil { - return 0, err - } - } - return io.Copy(wc, reader) -} - -func skip(reader io.Reader, count int64) (int64, error) { - if count <= 0 { - return 0, nil - } - return io.CopyN(ioutil.Discard, reader, count) -} - -func writeZeros(wc io.Writer, count int64) error { - buf := make([]byte, 32*1024) - for count > 0 { - size := cap(buf) - if int64(size) > count { - size = int(count) - } - n, err := wc.Write(buf[0:size]) - if err != nil { - return err - } - count = count - int64(n) - } - return nil -} - // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { @@ -390,6 +520,9 @@ func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, gcsContext := d.context(context) obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) if err == nil { + if obj.ContentType == uploadSessionContentType { + return nil, storagedriver.PathNotFoundError{Path: path} + } fi = storagedriver.FileInfoFields{ Path: path, Size: obj.Size, @@ -440,15 +573,10 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { } for _, object := range objects.Results { // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted - if object.Deleted.IsZero() { - name := object.Name - // Ignore objects with names that end with '#' (these are uploaded parts) - if name[len(name)-1] != '#' { - name = d.keyToPath(name) - list = append(list, name) - } + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { + list = append(list, d.keyToPath(object.Name)) } } for _, subpath := range objects.Prefixes { @@ -474,7 +602,7 @@ func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) e gcsContext := d.context(context) _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { - if status := err.(*googleapi.Error); status != nil { + if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } @@ -545,7 +673,7 @@ func (d *driver) Delete(context ctx.Context, path string) error { } err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) if err != nil { - if status := err.(*googleapi.Error); status != nil { + if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: path} } @@ -555,14 +683,14 @@ func (d *driver) Delete(context ctx.Context, path string) error { } func storageDeleteObject(context context.Context, bucket string, name string) error { - return retry(5, func() error { + return retry(func() error { return storage.DeleteObject(context, bucket, name) }) } func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { var obj *storage.Object - err := retry(5, func() error { + err := retry(func() error { var err error obj, err = storage.StatObject(context, bucket, name) return err @@ -572,7 +700,7 @@ func storageStatObject(context context.Context, bucket string, name string) (*st func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { var objs *storage.Objects - err := retry(5, func() error { + err := retry(func() error { var err error objs, err = storage.ListObjects(context, bucket, q) return err @@ -582,7 +710,7 @@ func storageListObjects(context context.Context, bucket string, q *storage.Query func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { var obj *storage.Object - err := retry(5, func() error { + err := retry(func() error { var err error obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) return err @@ -626,6 +754,80 @@ func (d *driver) URLFor(context ctx.Context, path string, options map[string]int return storage.SignedURL(d.bucket, name, opts) } +func startSession(client *http.Client, bucket string, name string) (uri string, err error) { + u := &url.URL{ + Scheme: "https", + Host: "www.googleapis.com", + Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), + RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), + } + err = retry(func() error { + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return err + } + req.Header.Set("X-Upload-Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", "0") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + uri = resp.Header.Get("Location") + return nil + }) + return uri, err +} + +func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { + bytesPut := int64(0) + err := retry(func() error { + req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) + if err != nil { + return err + } + length := int64(len(chunk)) + to := from + length - 1 + size := "*" + if totalSize >= 0 { + size = strconv.FormatInt(totalSize, 10) + } + req.Header.Set("Content-Type", "application/octet-stream") + if from == to+1 { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) + } else { + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) + } + req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if totalSize < 0 && resp.StatusCode == 308 { + groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) + end, err := strconv.ParseInt(groups[2], 10, 64) + if err != nil { + return err + } + bytesPut = end - from + 1 + return nil + } + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + bytesPut = to - from + 1 + return nil + }) + return bytesPut, err +} + func (d *driver) context(context ctx.Context) context.Context { return cloud.WithContext(context, dummyProjectID, d.client) } diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 7059b953..4852bf2c 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -85,6 +85,102 @@ func init() { }, skipGCS) } +// Test Committing a FileWriter without having called Write +func TestCommitEmpty(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != 0 { + t.Fatalf("writer.Size: %d != 0", writer.Size()) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != 0 { + t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) + } +} + +// Test Committing a FileWriter after having written exactly +// defaultChunksize bytes. +func TestCommit(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + contents := make([]byte, defaultChunkSize) + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + _, err = writer.Write(contents) + if err != nil { + t.Fatalf("writer.Write: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != int64(len(contents)) { + t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != len(contents) { + t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) + } +} + func TestRetry(t *testing.T) { if skipGCS() != "" { t.Skip(skipGCS()) @@ -100,7 +196,7 @@ func TestRetry(t *testing.T) { } } - err := retry(2, func() error { + err := retry(func() error { return &googleapi.Error{ Code: 503, Message: "google api error", @@ -108,7 +204,7 @@ func TestRetry(t *testing.T) { }) assertError("googleapi: Error 503: google api error", err) - err = retry(2, func() error { + err = retry(func() error { return &googleapi.Error{ Code: 404, Message: "google api error", @@ -116,7 +212,7 @@ func TestRetry(t *testing.T) { }) assertError("googleapi: Error 404: google api error", err) - err = retry(2, func() error { + err = retry(func() error { return fmt.Errorf("error") }) assertError("error", err) From 9432b18e300e89cdef0d16dc9b8957191f2237e7 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Sun, 14 Feb 2016 18:15:15 +0000 Subject: [PATCH 431/501] Storagedriver: GCS: add chunksize parameter Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 38 ++++++++++++++++++++++++++--- docs/storage/driver/gcs/gcs_test.go | 1 + 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index 14600dee..abe0b9f6 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -22,6 +22,7 @@ import ( "math/rand" "net/http" "net/url" + "reflect" "regexp" "sort" "strconv" @@ -50,7 +51,7 @@ const ( uploadSessionContentType = "application/x-docker-upload-session" minChunkSize = 256 * 1024 - maxChunkSize = 20 * minChunkSize + defaultChunkSize = 20 * minChunkSize maxTries = 5 ) @@ -65,6 +66,7 @@ type driverParameters struct { privateKey []byte client *http.Client rootDirectory string + chunkSize int } func init() { @@ -87,6 +89,7 @@ type driver struct { email string privateKey []byte rootDirectory string + chunkSize int } // FromParameters constructs a new Driver with a given parameters map @@ -103,6 +106,31 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri rootDirectory = "" } + chunkSize := defaultChunkSize + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int, uint, int32, uint32, uint64, int64: + chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + if chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) + } + } + var ts oauth2.TokenSource jwtConf := new(jwt.Config) if keyfile, ok := parameters["keyfile"]; ok { @@ -121,7 +149,6 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri if err != nil { return nil, err } - } params := driverParameters{ @@ -130,6 +157,7 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri email: jwtConf.Email, privateKey: jwtConf.PrivateKey, client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, } return New(params) @@ -141,12 +169,16 @@ func New(params driverParameters) (storagedriver.StorageDriver, error) { if rootDirectory != "" { rootDirectory += "/" } + if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) + } d := &driver{ bucket: params.bucket, rootDirectory: rootDirectory, email: params.email, privateKey: params.privateKey, client: params.client, + chunkSize: params.chunkSize, } return &base.Base{ @@ -263,7 +295,7 @@ func (d *driver) Writer(context ctx.Context, path string, append bool) (storaged client: d.client, bucket: d.bucket, name: d.pathToKey(path), - buffer: make([]byte, maxChunkSize), + buffer: make([]byte, d.chunkSize), } if append { diff --git a/docs/storage/driver/gcs/gcs_test.go b/docs/storage/driver/gcs/gcs_test.go index 4852bf2c..f2808d5f 100644 --- a/docs/storage/driver/gcs/gcs_test.go +++ b/docs/storage/driver/gcs/gcs_test.go @@ -75,6 +75,7 @@ func init() { email: email, privateKey: privateKey, client: oauth2.NewClient(ctx.Background(), ts), + chunkSize: defaultChunkSize, } return New(parameters) From 5b48c81545034e230b57d280914ccdecf1c4f8de Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 21 Feb 2016 08:54:32 +0800 Subject: [PATCH 432/501] Support FileWriter interface for OSS storage driver Change-Id: Ie5533ad85f944800499ca1040fd67bf1378815e0 Signed-off-by: Li Yi --- docs/storage/driver/oss/oss.go | 523 +++++++++++++-------------------- 1 file changed, 202 insertions(+), 321 deletions(-) diff --git a/docs/storage/driver/oss/oss.go b/docs/storage/driver/oss/oss.go index 1ec04525..7ae70334 100644 --- a/docs/storage/driver/oss/oss.go +++ b/docs/storage/driver/oss/oss.go @@ -20,7 +20,6 @@ import ( "reflect" "strconv" "strings" - "sync" "time" "github.com/docker/distribution/context" @@ -75,9 +74,6 @@ type driver struct { ChunkSize int64 Encrypt bool RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { @@ -99,8 +95,7 @@ type Driver struct { // - encrypt func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] if !ok { return nil, fmt.Errorf("No accesskeyid parameter provided") @@ -220,11 +215,6 @@ func New(params DriverParameters) (*Driver, error) { ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) } return &Driver{ @@ -256,9 +246,9 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") @@ -279,315 +269,37 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp.Body, nil } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []oss.Part{} - var part oss.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.ossPath(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return nil, err + } + return d.newWriter(key, multi, nil), nil + } + multis, _, err := d.Bucket.ListMulti(key, "") if err != nil { - return 0, err + return nil, parseError(path, err) } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } + for _, multi := range multis { + if key != multi.Key { + continue } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) + parts, err := multi.ListParts() if err != nil { - return err + return nil, parseError(path, err) } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - + var multiSize int64 + for _, part := range parts { + multiSize += part.Size } - return nil + return d.newWriter(key, multi, parts), nil } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the OSS - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying OSS library should handle it, it doesn't seem to - // be part of the shouldRetry function (see denverdino/aliyungo/oss). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part oss.Part - - part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.ossPath(path), nil) - if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil + return nil, storagedriver.PathNotFoundError{Path: path} } // Stat retrieves the FileInfo for the given path, including the current size @@ -778,12 +490,181 @@ func (d *driver) getContentType() string { return "application/octet-stream" } -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *oss.Multi + parts []oss.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool } -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) +func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []oss.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil } From 490a2f5a55cb2135d6a2575969dcbc29a535996a Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 8 Mar 2016 15:57:12 -0800 Subject: [PATCH 433/501] Updates Swift driver to support new storagedriver.FileWriter interface Signed-off-by: Brian Bland --- docs/storage/driver/swift/swift.go | 398 +++++++++++++---------------- 1 file changed, 184 insertions(+), 214 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 86bce794..c4d5a574 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -16,8 +16,8 @@ package swift import ( + "bufio" "bytes" - "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/tls" @@ -49,6 +49,9 @@ const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 +// contentType defines the Content-Type header associated with stored segments +const contentType = "application/octet-stream" + // readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded var readAfterWriteTimeout = 15 * time.Second @@ -282,16 +285,16 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(swift.Headers) headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" @@ -305,224 +308,46 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return file, err } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { var ( - segments []swift.Object - multi io.Reader - paddingReader io.Reader - currentLength int64 - cursor int64 - segmentPath string + segments []swift.Object + segmentsPath string + err error ) - partNumber := 1 - chunkSize := int64(d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) - hash := md5.New() - - getSegment := func() string { - return fmt.Sprintf("%s/%016d", segmentPath, partNumber) - } - - max := func(a int64, b int64) int64 { - if a > b { - return a - } - return b - } - - createManifest := true - info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - manifest, ok := headers["X-Object-Manifest"] - if !ok { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { - return 0, err - } - segments = append(segments, info) - } else { - _, segmentPath = parseManifest(manifest) - if segments, err = d.getAllSegments(segmentPath); err != nil { - return 0, err - } - createManifest = false - } - currentLength = info.Bytes - } else if err == swift.ObjectNotFound { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err + if !append { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err } } else { - return 0, err - } - - // First, we skip the existing segments that are not modified by this call - for i := range segments { - if offset < cursor+segments[i].Bytes { - break + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } else if err != nil { + return nil, err } - cursor += segments[i].Bytes - hash.Write([]byte(segments[i].Hash)) - partNumber++ - } - - // We reached the end of the file but we haven't reached 'offset' yet - // Therefore we add blocks of zeros - if offset >= currentLength { - for offset-currentLength >= chunkSize { - // Insert a block a zero - headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + manifest, ok := headers["X-Object-Manifest"] + if !ok { + segmentsPath, err = d.swiftSegmentPath(path) if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err + return nil, err } - currentLength += chunkSize - partNumber++ - hash.Write([]byte(headers["Etag"])) - } - - cursor = currentLength - paddingReader = bytes.NewReader(zeroBuf) - } else if offset-cursor > 0 { - // Offset is inside the current segment : we need to read the - // data from the beginning of the segment to offset - file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, segmentPath(segmentsPath, len(segments))); err != nil { + return nil, err } - return 0, err - } - defer file.Close() - paddingReader = file - } - - readers := []io.Reader{} - if paddingReader != nil { - readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) - } - readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) - multi = io.MultiReader(readers...) - - writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { - currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + segments = []swift.Object{info} + } else { + _, segmentsPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentsPath); err != nil { + return nil, err } - return false, bytesRead, err - } - - segmentHash := md5.New() - writer := io.MultiWriter(currentSegment, segmentHash) - - n, err := io.Copy(writer, multi) - if err != nil { - return false, bytesRead, err - } - - if n > 0 { - defer func() { - closeError := currentSegment.Close() - if err != nil { - err = closeError - } - hexHash := hex.EncodeToString(segmentHash.Sum(nil)) - hash.Write([]byte(hexHash)) - }() - bytesRead += n - max(0, offset-cursor) - } - - if n < chunkSize { - // We wrote all the data - if cursor+n < currentLength { - // Copy the end of the chunk - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - _, copyErr := io.Copy(writer, file) - - if err := file.Close(); err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - if copyErr != nil { - return false, bytesRead, copyErr - } - } - - return true, bytesRead, nil - } - - multi = io.LimitReader(reader, chunkSize) - cursor += chunkSize - partNumber++ - - return false, bytesRead, nil - } - - finished := false - read := int64(0) - bytesRead := int64(0) - for finished == false { - finished, read, err = writeSegment(getSegment()) - bytesRead += read - if err != nil { - return bytesRead, err } } - for ; partNumber < len(segments); partNumber++ { - hash.Write([]byte(segments[partNumber].Hash)) - } - - if createManifest { - if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { - return 0, err - } - } - - expectedHash := hex.EncodeToString(hash.Sum(nil)) - waitingTime := readAfterWriteWait - endTime := time.Now().Add(readAfterWriteTimeout) - for { - var infos swift.Object - if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { - if strings.Trim(infos.Hash, "\"") == expectedHash { - return bytesRead, nil - } - err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) - } - if time.Now().Add(waitingTime).After(endTime) { - break - } - time.Sleep(waitingTime) - waitingTime *= 2 - } - - return bytesRead, err + return d.newWriter(path, segmentsPath, segments), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -763,10 +588,6 @@ func (d *driver) swiftSegmentPath(path string) (string, error) { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil } -func (d *driver) getContentType() string { - return "application/octet-stream" -} - func (d *driver) getAllSegments(path string) ([]swift.Object, error) { segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) if err == swift.ContainerNotFound { @@ -778,7 +599,7 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) { func (d *driver) createManifest(path string, segments string) error { headers := make(swift.Headers) headers["X-Object-Manifest"] = segments - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) if err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} @@ -810,3 +631,152 @@ func generateSecret() (string, error) { } return hex.EncodeToString(secretBytes[:]), nil } + +func segmentPath(segmentsPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) +} + +type writer struct { + driver *driver + path string + segmentsPath string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { + var size int64 + for _, segment := range segments { + size += segment.Bytes + } + return &writer{ + driver: d, + path: path, + segmentsPath: segmentsPath, + size: size, + bw: bufio.NewWriterSize(&segmentWriter{ + conn: d.Conn, + container: d.Container, + segmentsPath: segmentsPath, + segmentNumber: len(segments) + 1, + maxChunkSize: d.ChunkSize, + }, d.ChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if !w.committed && !w.cancelled { + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + } + w.closed = true + + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.Delete(context.Background(), w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + + w.committed = true + + var err error + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + for { + var info swift.Object + if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { + if info.Bytes == w.size { + break + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return err +} + +type segmentWriter struct { + conn swift.Connection + container string + segmentsPath string + segmentNumber int + maxChunkSize int +} + +func (sw *segmentWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += sw.maxChunkSize { + chunkSize := sw.maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + _, err := sw.conn.ObjectPut(sw.container, segmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + if err != nil { + return n, err + } + + sw.segmentNumber++ + n += chunkSize + } + + return n, nil +} From 259ef42c8c2b04e3a6afbd8c7ebb175d084e83f4 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 9 Mar 2016 12:44:55 -0800 Subject: [PATCH 434/501] Add client ID to token fetch to GET endpoint Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/auth/session.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index 3f6e9164..058a87b9 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -19,6 +19,8 @@ import ( // basic auth due to lack of credentials. var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") +const defaultClientID = "registry-client" + // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -272,7 +274,7 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic clientID := th.clientID if clientID == "" { // Use default client, this is a required field - clientID = "registry-client" + clientID = defaultClientID } form.Set("client_id", clientID) @@ -355,6 +357,11 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, if th.offlineAccess { reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) } if th.creds != nil { From 19cfa36ec8b0a56b725730183bf7350270d3d7c1 Mon Sep 17 00:00:00 2001 From: Keerthan Mala Date: Sat, 5 Mar 2016 11:46:44 -0700 Subject: [PATCH 435/501] Added support to specifiy custom endpoint Signed-off-by: Keerthan Reddy Mala --- docs/storage/driver/s3-aws/s3.go | 64 ++++++++++++++++++--------- docs/storage/driver/s3-aws/s3_test.go | 2 + 2 files changed, 45 insertions(+), 21 deletions(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index 0e113680..5496311d 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -56,16 +56,17 @@ var validRegions = map[string]struct{}{} //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region string - Encrypt bool - Secure bool - ChunkSize int64 - RootDirectory string - StorageClass string - UserAgent string + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + StorageClass string + UserAgent string } func init() { @@ -153,6 +154,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("No bucket parameter provided") } + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + encryptBool := false encrypt := parameters["encrypt"] switch encrypt := encrypt.(type) { @@ -240,6 +246,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { fmt.Sprint(secretKey), fmt.Sprint(bucket), region, + fmt.Sprint(regionEndpoint), encryptBool, secureBool, chunkSize, @@ -255,22 +262,37 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // bucketName func New(params DriverParameters) (*Driver, error) { awsConfig := aws.NewConfig() - creds := credentials.NewChainCredentials([]credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: params.AccessKey, - SecretAccessKey: params.SecretKey, + var creds *credentials.Credentials + if params.RegionEndpoint == "" { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, }, - }, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, - }) + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + } else { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + }) + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(params.RegionEndpoint) + } awsConfig.WithCredentials(creds) awsConfig.WithRegion(params.Region) awsConfig.WithDisableSSL(!params.Secure) - // awsConfig.WithMaxRetries(10) if params.UserAgent != "" { awsConfig.WithHTTPClient(&http.Client{ diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go index 1137b3e2..f12297bf 100644 --- a/docs/storage/driver/s3-aws/s3_test.go +++ b/docs/storage/driver/s3-aws/s3_test.go @@ -30,6 +30,7 @@ func init() { secure := os.Getenv("S3_SECURE") region := os.Getenv("AWS_REGION") root, err := ioutil.TempDir("", "driver-") + regionEndpoint := os.Getenv("REGION_ENDPOINT") if err != nil { panic(err) } @@ -57,6 +58,7 @@ func init() { secretKey, bucket, region, + regionEndpoint, encryptBool, secureBool, minChunkSize, From eea043dc7bc8aa404e8821041b412468ae936620 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 10 Mar 2016 16:46:43 -0800 Subject: [PATCH 436/501] Removes ceph rados driver in favor of Swift API gateway support Signed-off-by: Brian Bland --- docs/storage/driver/rados/doc.go | 3 - docs/storage/driver/rados/rados.go | 632 ------------------------ docs/storage/driver/rados/rados_test.go | 40 -- 3 files changed, 675 deletions(-) delete mode 100644 docs/storage/driver/rados/doc.go delete mode 100644 docs/storage/driver/rados/rados.go delete mode 100644 docs/storage/driver/rados/rados_test.go diff --git a/docs/storage/driver/rados/doc.go b/docs/storage/driver/rados/doc.go deleted file mode 100644 index 655c68a3..00000000 --- a/docs/storage/driver/rados/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rados implements the rados storage driver backend. Support can be -// enabled by including the "include_rados" build tag. -package rados diff --git a/docs/storage/driver/rados/rados.go b/docs/storage/driver/rados/rados.go deleted file mode 100644 index c2be528e..00000000 --- a/docs/storage/driver/rados/rados.go +++ /dev/null @@ -1,632 +0,0 @@ -// +build include_rados - -package rados - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "path" - "strconv" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/uuid" - "github.com/noahdesu/go-ceph/rados" -) - -const driverName = "rados" - -// Prefix all the stored blob -const objectBlobPrefix = "blob:" - -// Stripes objects size to 4M -const defaultChunkSize = 4 << 20 -const defaultXattrTotalSizeName = "total-size" - -// Max number of keys fetched from omap at each read operation -const defaultKeysFetched = 1 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - poolname string - username string - chunksize uint64 -} - -func init() { - factory.Register(driverName, &radosDriverFactory{}) -} - -// radosDriverFactory implements the factory.StorageDriverFactory interface -type radosDriverFactory struct{} - -func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn *rados.Conn - Ioctx *rados.IOContext - chunksize uint64 -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - poolname: the ceph pool name -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - - pool, ok := parameters["poolname"] - if !ok { - return nil, fmt.Errorf("No poolname parameter provided") - } - - username, ok := parameters["username"] - if !ok { - username = "" - } - - chunksize := uint64(defaultChunkSize) - chunksizeParam, ok := parameters["chunksize"] - if ok { - chunksize, ok = chunksizeParam.(uint64) - if !ok { - return nil, fmt.Errorf("The chunksize parameter should be a number") - } - } - - params := DriverParameters{ - fmt.Sprint(pool), - fmt.Sprint(username), - chunksize, - } - - return New(params) -} - -// New constructs a new Driver -func New(params DriverParameters) (*Driver, error) { - var conn *rados.Conn - var err error - - if params.username != "" { - log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) - conn, err = rados.NewConnWithUser(params.username) - } else { - log.Infof("Opening connection to pool %s", params.poolname) - conn, err = rados.NewConn() - } - - if err != nil { - return nil, err - } - - err = conn.ReadDefaultConfigFile() - if err != nil { - return nil, err - } - - err = conn.Connect() - if err != nil { - return nil, err - } - - log.Infof("Connected") - - ioctx, err := conn.OpenIOContext(params.poolname) - - log.Infof("Connected to pool %s", params.poolname) - - if err != nil { - return nil, err - } - - d := &driver{ - Ioctx: ioctx, - Conn: conn, - chunksize: params.chunksize, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -type readStreamReader struct { - driver *driver - oid string - size uint64 - offset uint64 -} - -func (r *readStreamReader) Read(b []byte) (n int, err error) { - // Determine the part available to read - bufferOffset := uint64(0) - bufferSize := uint64(len(b)) - - // End of the object, read less than the buffer size - if bufferSize > r.size-r.offset { - bufferSize = r.size - r.offset - } - - // Fill `b` - for bufferOffset < bufferSize { - // Get the offset in the object chunk - chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) - - // Determine the best size to read - bufferEndOffset := bufferSize - if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { - bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) - } - - // Read the chunk - n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) - - if err != nil { - return int(bufferOffset), err - } - - bufferOffset += uint64(n) - r.offset += uint64(n) - } - - // EOF if the offset is at the end of the object - if r.offset == r.size { - return int(bufferOffset), io.EOF - } - - return int(bufferOffset), nil -} - -func (r *readStreamReader) Close() error { - return nil -} - -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // get object stat - stat, err := d.Stat(ctx, path) - - if err != nil { - return nil, err - } - - if offset > stat.Size() { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return &readStreamReader{ - driver: d, - oid: oid, - size: uint64(stat.Size()), - offset: uint64(offset), - }, nil -} - -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - buf := make([]byte, d.chunksize) - totalRead = 0 - - oid, err := d.getOid(path) - if err != nil { - switch err.(type) { - // Trying to write new object, generate new blob identifier for it - case storagedriver.PathNotFoundError: - oid = d.generateOid() - err = d.putOid(path, oid) - if err != nil { - return 0, err - } - default: - return 0, err - } - } else { - // Check total object size only for existing ones - totalSize, err := d.getXattrTotalSize(ctx, oid) - if err != nil { - return 0, err - } - - // If offset if after the current object size, fill the gap with zeros - for totalSize < uint64(offset) { - sizeToWrite := d.chunksize - if totalSize-uint64(offset) < sizeToWrite { - sizeToWrite = totalSize - uint64(offset) - } - - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) - err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) - if err != nil { - return totalRead, err - } - - totalSize += sizeToWrite - } - } - - // Writer - for { - // Align to chunk size - sizeRead := uint64(0) - sizeToRead := uint64(offset+totalRead) % d.chunksize - if sizeToRead == 0 { - sizeToRead = d.chunksize - } - - // Read from `reader` - for sizeRead < sizeToRead { - nn, err := reader.Read(buf[sizeRead:sizeToRead]) - sizeRead += uint64(nn) - - if err != nil { - if err != io.EOF { - return totalRead, err - } - - break - } - } - - // End of file and nothing was read - if sizeRead == 0 { - break - } - - // Write chunk object - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) - err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) - - if err != nil { - return totalRead, err - } - - // Update total object size as xattr in the first chunk of the object - err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) - if err != nil { - return totalRead, err - } - - totalRead += int64(sizeRead) - - // End of file - if sizeRead < sizeToRead { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // the path is a virtual directory? - if oid == "" { - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: 0, - IsDir: true, - }, - }, nil - } - - // stat first chunk - stat, err := d.Ioctx.Stat(oid + "-0") - - if err != nil { - return nil, err - } - - // get total size of chunked object - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(totalSize), - ModTime: stat.ModTime, - }, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { - files, err := d.listDirectoryOid(dirPath) - - if err != nil { - return nil, storagedriver.PathNotFoundError{Path: dirPath} - } - - keys := make([]string, 0, len(files)) - for k := range files { - if k != dirPath { - keys = append(keys, path.Join(dirPath, k)) - } - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - // Get oid - oid, err := d.getOid(sourcePath) - - if err != nil { - return err - } - - // Move reference - err = d.putOid(destPath, oid) - - if err != nil { - return err - } - - // Delete old reference - err = d.deleteOid(sourcePath) - - if err != nil { - return err - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, objectPath string) error { - // Get oid - oid, err := d.getOid(objectPath) - - if err != nil { - return err - } - - // Deleting virtual directory - if oid == "" { - objects, err := d.listDirectoryOid(objectPath) - if err != nil { - return err - } - - for object := range objects { - err = d.Delete(ctx, path.Join(objectPath, object)) - if err != nil { - return err - } - } - } else { - // Delete object chunks - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return err - } - - for offset := uint64(0); offset < totalSize; offset += d.chunksize { - chunkName, _ := d.getChunkNameFromOffset(oid, offset) - - err = d.Ioctx.Delete(chunkName) - if err != nil { - return err - } - } - - // Delete reference - err = d.deleteOid(objectPath) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -// Generate a blob identifier -func (d *driver) generateOid() string { - return objectBlobPrefix + uuid.Generate().String() -} - -// Reference a object and its hierarchy -func (d *driver) putOid(objectPath string, oid string) error { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - createParentReference := true - - // After creating this reference, skip the parents referencing since the - // hierarchy already exists - if oid == "" { - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - if (err == nil) && (len(firstReference) > 0) { - createParentReference = false - } - } - - oids := map[string][]byte{ - base: []byte(oid), - } - - // Reference object - err := d.Ioctx.SetOmap(directory, oids) - if err != nil { - return err - } - - // Esure parent virtual directories - if createParentReference { - return d.putOid(directory, "") - } - - return nil -} - -// Get the object identifier from an object name -func (d *driver) getOid(objectPath string) (string, error) { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - - files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) - - if (err != nil) || (files[base] == nil) { - return "", storagedriver.PathNotFoundError{Path: objectPath} - } - - return string(files[base]), nil -} - -// List the objects of a virtual directory -func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { - return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) -} - -// Remove a file from the files hierarchy -func (d *driver) deleteOid(objectPath string) error { - // Remove object reference - directory := path.Dir(objectPath) - base := path.Base(objectPath) - err := d.Ioctx.RmOmapKeys(directory, []string{base}) - - if err != nil { - return err - } - - // Remove virtual directory if empty (no more references) - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - - if err != nil { - return err - } - - if len(firstReference) == 0 { - // Delete omap - err := d.Ioctx.Delete(directory) - - if err != nil { - return err - } - - // Remove reference on parent omaps - if directory != "" { - return d.deleteOid(directory) - } - } - - return nil -} - -// Takes an offset in an chunked object and return the chunk name and a new -// offset in this chunk object -func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { - chunkID := offset / d.chunksize - chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) - chunkedOffset := offset % d.chunksize - return chunkedOid, chunkedOffset -} - -// Set the total size of a chunked object `oid` -func (d *driver) setXattrTotalSize(oid string, size uint64) error { - // Convert uint64 `size` to []byte - xattr := make([]byte, binary.MaxVarintLen64) - binary.LittleEndian.PutUint64(xattr, size) - - // Save the total size as a xattr in the first chunk - return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) -} - -// Get the total size of the chunked object `oid` stored as xattr -func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { - // Fetch xattr as []byte - xattr := make([]byte, binary.MaxVarintLen64) - xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) - - if err != nil { - return 0, err - } - - if xattrLength != len(xattr) { - context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) - return 0, storagedriver.PathNotFoundError{Path: oid} - } - - // Convert []byte as uint64 - totalSize := binary.LittleEndian.Uint64(xattr) - - return totalSize, nil -} diff --git a/docs/storage/driver/rados/rados_test.go b/docs/storage/driver/rados/rados_test.go deleted file mode 100644 index ce367fb5..00000000 --- a/docs/storage/driver/rados/rados_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build include_rados - -package rados - -import ( - "os" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - poolname := os.Getenv("RADOS_POOL") - username := os.Getenv("RADOS_USER") - - driverConstructor := func() (storagedriver.StorageDriver, error) { - parameters := DriverParameters{ - poolname, - username, - defaultChunkSize, - } - - return New(parameters) - } - - skipCheck := func() string { - if poolname == "" { - return "RADOS_POOL must be set to run Rado tests" - } - return "" - } - - testsuites.RegisterSuite(driverConstructor, skipCheck) -} From 789c90ac4216f03289ac4f53b11a53ed849dbe33 Mon Sep 17 00:00:00 2001 From: Matt Duch Date: Wed, 9 Mar 2016 18:52:59 -0600 Subject: [PATCH 437/501] registry/storage/driver/s3-aws kms support Signed-off-by: Matt Duch --- docs/storage/driver/s3-aws/s3.go | 24 +++++++++++++++++++++++- docs/storage/driver/s3-aws/s3_test.go | 2 ++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index db61b4e7..8683f80e 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -60,6 +60,7 @@ type DriverParameters struct { Region string RegionEndpoint string Encrypt bool + KeyID string Secure bool ChunkSize int64 RootDirectory string @@ -100,6 +101,7 @@ type driver struct { Bucket string ChunkSize int64 Encrypt bool + KeyID string RootDirectory string StorageClass string } @@ -188,6 +190,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("The secure parameter should be a boolean") } + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + chunkSize := int64(defaultChunkSize) chunkSizeParam := parameters["chunksize"] switch v := chunkSizeParam.(type) { @@ -243,6 +250,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { region, fmt.Sprint(regionEndpoint), encryptBool, + fmt.Sprint(keyID), secureBool, chunkSize, fmt.Sprint(rootDirectory), @@ -317,6 +325,7 @@ func New(params DriverParameters) (*Driver, error) { Bucket: params.Bucket, ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, + KeyID: params.KeyID, RootDirectory: params.RootDirectory, StorageClass: params.StorageClass, } @@ -353,6 +362,7 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e ContentType: d.getContentType(), ACL: d.getACL(), ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), StorageClass: d.getStorageClass(), Body: bytes.NewReader(contents), }) @@ -390,6 +400,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged ContentType: d.getContentType(), ACL: d.getACL(), ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), StorageClass: d.getStorageClass(), }) if err != nil { @@ -534,6 +545,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e ContentType: d.getContentType(), ACL: d.getACL(), ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), StorageClass: d.getStorageClass(), CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), }) @@ -645,9 +657,19 @@ func parseError(path string, err error) error { } func (d *driver) getEncryptionMode() *string { - if d.Encrypt { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { return aws.String("AES256") } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } return nil } diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go index f12297bf..bb64ccf4 100644 --- a/docs/storage/driver/s3-aws/s3_test.go +++ b/docs/storage/driver/s3-aws/s3_test.go @@ -27,6 +27,7 @@ func init() { secretKey := os.Getenv("AWS_SECRET_KEY") bucket := os.Getenv("S3_BUCKET") encrypt := os.Getenv("S3_ENCRYPT") + keyID := os.Getenv("S3_KEY_ID") secure := os.Getenv("S3_SECURE") region := os.Getenv("AWS_REGION") root, err := ioutil.TempDir("", "driver-") @@ -60,6 +61,7 @@ func init() { region, regionEndpoint, encryptBool, + keyID, secureBool, minChunkSize, rootDirectory, From c94c2a47a3167adcfe8cb17b96ec632e33334bbd Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 14 Mar 2016 10:06:30 -0700 Subject: [PATCH 438/501] Don't return empty errcode.Errors slices If this slice ends up empty after parsing the HTTP response body, it means the body is not well-formed. We've probably encountered an error message produced by something that uses a different JSON schema, or an error that just happens to validate as JSON. An empty errcode.Errors slice is not a very useful thing to return, since its Error() output is just ``. Detect this case, and instend return an UnexpectedHTTPResponseError. Signed-off-by: Aaron Lehmann --- docs/client/errors.go | 19 +++++++++++++++++-- docs/client/errors_test.go | 19 +++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index a528a865..043782bf 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -2,6 +2,7 @@ package client import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -10,6 +11,10 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) +// ErrNoErrorsInBody is returned when a HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { @@ -17,7 +22,7 @@ type UnexpectedHTTPStatusError struct { } func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code @@ -28,7 +33,7 @@ type UnexpectedHTTPResponseError struct { } func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) + return fmt.Sprintf("error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { @@ -57,6 +62,16 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { Response: body, } } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + Response: body, + } + } + return errors } diff --git a/docs/client/errors_test.go b/docs/client/errors_test.go index 80241a5a..1d60cd2d 100644 --- a/docs/client/errors_test.go +++ b/docs/client/errors_test.go @@ -59,6 +59,21 @@ func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { } } +func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { + json := `{"randomkey": "randomvalue"}` + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := `error parsing HTTP response: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { json := "{invalid json}" response := &http.Response{ @@ -68,7 +83,7 @@ func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { } err := HandleErrorResponse(response) - expectedMsg := "Error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + expectedMsg := "error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } @@ -82,7 +97,7 @@ func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { } err := HandleErrorResponse(response) - expectedMsg := "Received unexpected HTTP status: 501 Not Implemented" + expectedMsg := "received unexpected HTTP status: 501 Not Implemented" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } From 98140ca0ab7477a7ec19ec04f1f6053a320ccc87 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 14 Mar 2016 11:18:27 -0700 Subject: [PATCH 439/501] Update missing blob error checking with latest Azure API Signed-off-by: Richard Scothern --- docs/storage/driver/azure/azure.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/azure/azure.go b/docs/storage/driver/azure/azure.go index 70771375..b06b0876 100644 --- a/docs/storage/driver/azure/azure.go +++ b/docs/storage/driver/azure/azure.go @@ -382,8 +382,8 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { } func is404(err error) bool { - statusCodeErr, ok := err.(azure.UnexpectedStatusCodeError) - return ok && statusCodeErr.Got() == http.StatusNotFound + statusCodeErr, ok := err.(azure.AzureStorageServiceError) + return ok && statusCodeErr.StatusCode == http.StatusNotFound } type writer struct { From 9638c7644e5fae4083556d6dd213241d02685162 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 15 Mar 2016 09:03:56 -0700 Subject: [PATCH 440/501] Include status code in UnexpectedHTTPResponseError Signed-off-by: Aaron Lehmann --- docs/client/errors.go | 17 ++++++++++------- docs/client/errors_test.go | 4 ++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/client/errors.go b/docs/client/errors.go index 043782bf..00fafe11 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -28,12 +28,13 @@ func (e *UnexpectedHTTPStatusError) Error() string { // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { - ParseErr error - Response []byte + ParseErr error + StatusCode int + Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { @@ -58,8 +59,9 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ - ParseErr: err, - Response: body, + ParseErr: err, + StatusCode: statusCode, + Response: body, } } @@ -67,8 +69,9 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { // If there was no error specified in the body, return // UnexpectedHTTPResponseError. return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - Response: body, + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, } } diff --git a/docs/client/errors_test.go b/docs/client/errors_test.go index 1d60cd2d..ca9dddd1 100644 --- a/docs/client/errors_test.go +++ b/docs/client/errors_test.go @@ -68,7 +68,7 @@ func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { } err := HandleErrorResponse(response) - expectedMsg := `error parsing HTTP response: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } @@ -83,7 +83,7 @@ func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { } err := HandleErrorResponse(response) - expectedMsg := "error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } From 20bba4025a5ffae435e6450ef70e050897211bf4 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Wed, 16 Mar 2016 19:46:40 +0100 Subject: [PATCH 441/501] registry: client: repository: close response body Signed-off-by: Antonio Murdaca --- docs/client/repository.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/client/repository.go b/docs/client/repository.go index 830749f1..936a3f1b 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -308,6 +308,7 @@ check: if err != nil { return distribution.Descriptor{}, err } + defer resp.Body.Close() switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: From e6b317f94fc232caec13232fe0bfc309fa358fbc Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 18 Mar 2016 09:12:27 +0100 Subject: [PATCH 442/501] registry: client: auth: type errors Signed-off-by: Antonio Murdaca --- docs/client/auth/session.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/client/auth/session.go b/docs/client/auth/session.go index c80108ac..f3497b17 100644 --- a/docs/client/auth/session.go +++ b/docs/client/auth/session.go @@ -15,9 +15,15 @@ import ( "github.com/docker/distribution/registry/client/transport" ) -// ErrNoBasicAuthCredentials is returned if a request can't be authorized with -// basic auth due to lack of credentials. -var ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) const defaultClientID = "registry-client" @@ -402,7 +408,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, } if tr.Token == "" { - return "", time.Time{}, errors.New("authorization server did not include a token in the response") + return "", time.Time{}, ErrNoToken } if tr.ExpiresIn < minimumTokenLifetimeSeconds { From 5f38f0b1feda4d2da0d4ff20eedaa1ff9604b3d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E7=BB=A7=E5=BF=A0?= Date: Wed, 16 Mar 2016 14:12:56 +0800 Subject: [PATCH 443/501] fix manifest revision search, closes #1535 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 姜继忠 --- docs/storage/linkedblobstore.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index e06f9540..68a347b4 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -384,8 +384,8 @@ var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { var ( - resolveErr error - target digest.Digest + found bool + target digest.Digest ) // try the many link path functions until we get success or an error that @@ -395,19 +395,20 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) if err == nil { + found = true break // success! } switch err := err.(type) { case driver.PathNotFoundError: - resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error + // do nothing, just move to the next linkPathFn default: return distribution.Descriptor{}, err } } - if resolveErr != nil { - return distribution.Descriptor{}, resolveErr + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown } if target != dgst { From f93d166068e23025f9c49b873b8d0d8e40828568 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Fri, 18 Mar 2016 15:30:47 -0700 Subject: [PATCH 444/501] Propogate tag as a functional argument into the notification system to attach tags to manifest push and pull event notifications. Signed-off-by: Richard Scothern --- docs/client/repository.go | 25 +++++-------------------- docs/client/repository_test.go | 8 ++++---- docs/handlers/images.go | 12 ++++++++++-- 3 files changed, 19 insertions(+), 26 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 936a3f1b..643e23a0 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -402,9 +402,9 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis ) for _, option := range options { - if opt, ok := option.(withTagOption); ok { - digestOrTag = opt.tag - ref, err = reference.WithTag(ms.name, opt.tag) + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } @@ -465,21 +465,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, HandleErrorResponse(resp) } -// WithTag allows a tag to be passed into Put which enables the client -// to build a correct URL. -func WithTag(tag string) distribution.ManifestServiceOption { - return withTagOption{tag} -} - -type withTagOption struct{ tag string } - -func (o withTagOption) Apply(m distribution.ManifestService) error { - if _, ok := m.(*manifests); ok { - return nil - } - return fmt.Errorf("withTagOption is a client-only option") -} - // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the // tag name in order to build the correct upload URL. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { @@ -487,9 +472,9 @@ func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options . var tagged bool for _, option := range options { - if opt, ok := option.(withTagOption); ok { + if opt, ok := option.(distribution.WithTagOption); ok { var err error - ref, err = reference.WithTag(ref, opt.tag) + ref, err = reference.WithTag(ref, opt.Tag) if err != nil { return "", err } diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index df26b631..2faeb276 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -710,7 +710,7 @@ func TestV1ManifestFetch(t *testing.T) { t.Fatal(err) } - manifest, err = ms.Get(ctx, dgst, WithTag("latest")) + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest")) if err != nil { t.Fatal(err) } @@ -723,7 +723,7 @@ func TestV1ManifestFetch(t *testing.T) { t.Fatal(err) } - manifest, err = ms.Get(ctx, dgst, WithTag("badcontenttype")) + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) if err != nil { t.Fatal(err) } @@ -761,7 +761,7 @@ func TestManifestFetchWithEtag(t *testing.T) { if !ok { panic("wrong type for client manifest service") } - _, err = clientManifestService.Get(ctx, d1, WithTag("latest"), AddEtagToTag("latest", d1.String())) + _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) if err != distribution.ErrManifestNotModified { t.Fatal(err) } @@ -861,7 +861,7 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } - if _, err := ms.Put(ctx, m1, WithTag(m1.Tag)); err != nil { + if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { t.Fatal(err) } diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 8ef7197a..5f2d8855 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -86,7 +86,11 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } - manifest, err = manifests.Get(imh, imh.Digest) + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return @@ -245,7 +249,11 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - _, err = manifests.Put(imh, manifest) + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + _, err = manifests.Put(imh, manifest, options...) if err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. From 3dd506d896764c2a5906f4c0b78b0b0b0fb59df4 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 22 Feb 2016 17:49:23 -0800 Subject: [PATCH 445/501] Enable URLs returned from the registry to be configured as relative. Signed-off-by: Richard Scothern --- docs/api/v2/urls.go | 29 +++++--- docs/api/v2/urls_test.go | 140 +++++++++++++++++++++++--------------- docs/client/repository.go | 4 +- docs/handlers/api_test.go | 122 +++++++++++++++++++++++++++------ docs/handlers/app.go | 4 +- docs/handlers/app_test.go | 2 +- 6 files changed, 210 insertions(+), 91 deletions(-) diff --git a/docs/api/v2/urls.go b/docs/api/v2/urls.go index 408c7b74..a959aaa8 100644 --- a/docs/api/v2/urls.go +++ b/docs/api/v2/urls.go @@ -17,33 +17,35 @@ import ( // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool } // NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL) *URLBuilder { +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { return &URLBuilder{ - root: root, - router: Router(), + root: root, + router: Router(), + relative: relative, } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. -func NewURLBuilderFromString(root string) (*URLBuilder, error) { +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } - return NewURLBuilder(u), nil + return NewURLBuilder(u, relative), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. -func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") @@ -85,7 +87,7 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { u.Path = requestPath[0 : index+1] } - return NewURLBuilder(u) + return NewURLBuilder(u, relative) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". @@ -194,12 +196,13 @@ func (ub *URLBuilder) cloneRoute(name string) clonedRoute { *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root - return clonedRoute{Route: route, root: root} + return clonedRoute{Route: route, root: root, relative: ub.relative} } type clonedRoute struct { *mux.Route - root *url.URL + root *url.URL + relative bool } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { @@ -208,6 +211,10 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if cr.relative { + return routeURL, nil + } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } diff --git a/docs/api/v2/urls_test.go b/docs/api/v2/urls_test.go index 1af1f261..10aadd52 100644 --- a/docs/api/v2/urls_test.go +++ b/docs/api/v2/urls_test.go @@ -92,25 +92,31 @@ func TestURLBuilder(t *testing.T) { "https://localhost:5443", } - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + t.Fatalf("unexpected error creating urlbuilder: %v", err) } - expectedURL := root + testCase.expectedPath + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root + expectedURL + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } } } } + doTest(true) + doTest(false) } func TestURLBuilderWithPrefix(t *testing.T) { @@ -121,25 +127,31 @@ func TestURLBuilderWithPrefix(t *testing.T) { "https://localhost:5443/prefix/", } - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + t.Fatalf("unexpected error creating urlbuilder: %v", err) } - expectedURL := root[0:len(root)-1] + testCase.expectedPath + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root[0:len(root)-1] + expectedURL + } + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } } } } + doTest(true) + doTest(false) } type builderFromRequestTestCase struct { @@ -197,39 +209,48 @@ func TestBuilderFromRequest(t *testing.T) { }, }, } - - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) - } else { - builder = NewURLBuilderFromRequest(tr.request) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = tr.base + testCase.expectedPath + doTest := func(relative bool) { + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, relative) } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = urlBase.String() + testCase.expectedPath + builder = NewURLBuilderFromRequest(tr.request, relative) } - if buildURL != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String() + expectedURL + } + } + + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + } } } } + doTest(true) + doTest(false) } func TestBuilderFromRequestWithPrefix(t *testing.T) { @@ -270,12 +291,13 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { }, } + var relative bool for _, tr := range testRequests { var builder *URLBuilder if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) + builder = NewURLBuilder(&tr.configHost, false) } else { - builder = NewURLBuilderFromRequest(tr.request) + builder = NewURLBuilderFromRequest(tr.request, false) } for _, testCase := range makeURLBuilderTestCases(builder) { @@ -283,17 +305,25 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } + var expectedURL string proto, ok := tr.request.Header["X-Forwarded-Proto"] if !ok { - expectedURL = tr.base[0:len(tr.base)-1] + testCase.expectedPath + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base[0:len(tr.base)-1] + expectedURL + } } else { urlBase, err := url.Parse(tr.base) if err != nil { t.Fatal(err) } urlBase.Scheme = proto[0] - expectedURL = urlBase.String()[0:len(urlBase.String())-1] + testCase.expectedPath + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL + } + } if buildURL != expectedURL { diff --git a/docs/client/repository.go b/docs/client/repository.go index 936a3f1b..ca4048db 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error { // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } @@ -133,7 +133,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri // NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index d6488869..523ecca2 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -43,7 +43,6 @@ var headerConfig = http.Header{ // 200 OK response. func TestCheckAPI(t *testing.T) { env := newTestEnv(t, false) - baseURL, err := env.builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) @@ -294,6 +293,79 @@ func TestBlobDelete(t *testing.T) { testBlobDelete(t, env, args) } +func TestRelativeURL(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + config.HTTP.Headers = headerConfig + config.HTTP.RelativeURLs = false + env := newTestEnvWithConfig(t, &config) + ref, _ := reference.WithName("foo/bar") + uploadURLBaseAbs, _ := startPushLayer(t, env, ref) + + u, err := url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + args := makeBlobArgs(t) + resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } + + config.HTTP.RelativeURLs = true + args = makeBlobArgs(t) + uploadURLBaseRelative, _ := startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseRelative) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") + } + + // Start a new upload in absolute mode to get a valid base URL + config.HTTP.RelativeURLs = false + uploadURLBaseAbs, _ = startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + // Complete upload with relative URLs enabled to ensure the final location is relative + config.HTTP.RelativeURLs = true + resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } +} + func TestBlobDeleteDisabled(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) @@ -349,7 +421,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ------------------------------------------ // Start an upload, check the status then cancel - uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) // A status check should work resp, err = http.Get(uploadURLBase) @@ -384,7 +456,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------------- // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) @@ -400,7 +472,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { t.Fatalf("unexpected error digesting empty buffer: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- @@ -413,7 +485,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { t.Fatalf("unexpected error digesting empty tar: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ @@ -421,7 +493,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------------------------ @@ -435,7 +507,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { canonicalDigest := canonicalDigester.Digest() layerFile.Seek(0, 0) - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) @@ -585,7 +657,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // Reupload previously deleted blob layerFile.Seek(0, os.SEEK_SET) - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) layerFile.Seek(0, os.SEEK_SET) @@ -625,7 +697,7 @@ func TestDeleteDisabled(t *testing.T) { if err != nil { t.Fatalf("Error building blob URL") } - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) resp, err := httpDelete(layerURL) @@ -651,7 +723,7 @@ func TestDeleteReadOnly(t *testing.T) { if err != nil { t.Fatalf("Error building blob URL") } - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) env.app.readOnly = true @@ -871,7 +943,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } @@ -1177,7 +1249,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name }`) sampleConfigDigest := digest.FromBytes(sampleConfig) - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) manifest.Config.Digest = sampleConfigDigest manifest.Config.Size = int64(len(sampleConfig)) @@ -1210,7 +1282,7 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name expectedLayers[dgst] = rs manifest.Layers[i].Digest = dgst - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } @@ -1842,7 +1914,7 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te app := NewApp(ctx, config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) + builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) if err != nil { t.Fatalf("error creating url builder: %v", err) @@ -1904,21 +1976,33 @@ func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *htt return resp } -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named) (location string, uuid string) { - layerUploadURL, err := ub.BuildBlobUploadURL(name) +func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { + layerUploadURL, err := env.builder.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) } + u, err := url.Parse(layerUploadURL) + if err != nil { + t.Fatalf("error parsing layer upload URL: %v", err) + } + + base, err := url.Parse(env.server.URL) + if err != nil { + t.Fatalf("error parsing server URL: %v", err) + } + + layerUploadURL = base.ResolveReference(u).String() resp, err := http.Post(layerUploadURL, "", nil) if err != nil { t.Fatalf("unexpected error starting layer push: %v", err) } + defer resp.Body.Close() checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) - u, err := url.Parse(resp.Header.Get("Location")) + u, err = url.Parse(resp.Header.Get("Location")) if err != nil { t.Fatalf("error parsing location header: %v", err) } @@ -1943,7 +2027,6 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst dig u.RawQuery = url.Values{ "_state": u.Query()["_state"], - "digest": []string{dgst.String()}, }.Encode() @@ -2211,8 +2294,7 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env.builder, imageNameRef) + uploadURLBase, _ := startPushLayer(t, env, imageNameRef) pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 2a60001f..3c3e50d0 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -721,9 +721,9 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { // A "host" item in the configuration takes precedence over // X-Forwarded-Proto and X-Forwarded-Host headers, and the // hostname in the request. - context.urlBuilder = v2.NewURLBuilder(&app.httpHost) + context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) } else { - context.urlBuilder = v2.NewURLBuilderFromRequest(r) + context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) } return context diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index b9e9d312..caa7ab97 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -160,7 +160,7 @@ func TestNewApp(t *testing.T) { app := NewApp(ctx, &config) server := httptest.NewServer(app) - builder, err := v2.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL, false) if err != nil { t.Fatalf("error creating urlbuilder: %v", err) } From d52cbf923ce982e80d0263336ffdb4cc12510d41 Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Thu, 24 Mar 2016 11:33:01 -0700 Subject: [PATCH 446/501] utulize config log format within gc Signed-off-by: Tony Holdstock-Brown --- docs/garbagecollect.go | 13 +++++++++---- docs/garbagecollect_test.go | 6 +++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index 5e165aea..add25a73 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -17,9 +17,7 @@ import ( "github.com/spf13/cobra" ) -func markAndSweep(storageDriver driver.StorageDriver) error { - ctx := context.Background() - +func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error { // Construct a registry registry, err := storage.NewRegistry(ctx, storageDriver) if err != nil { @@ -141,7 +139,14 @@ var GCCmd = &cobra.Command{ os.Exit(1) } - err = markAndSweep(driver) + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + err = markAndSweep(ctx, driver) if err != nil { fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) os.Exit(1) diff --git a/docs/garbagecollect_test.go b/docs/garbagecollect_test.go index 951a9e81..6096e758 100644 --- a/docs/garbagecollect_test.go +++ b/docs/garbagecollect_test.go @@ -161,7 +161,7 @@ func TestNoDeletionNoEffect(t *testing.T) { } // Run GC - err = markAndSweep(inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -193,7 +193,7 @@ func TestDeletionHasEffect(t *testing.T) { manifests.Delete(ctx, image3.manifestDigest) // Run GC - err = markAndSweep(inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -327,7 +327,7 @@ func TestOrphanBlobDeleted(t *testing.T) { uploadRandomSchema2Image(t, repo) // Run GC - err = markAndSweep(inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } From 0f09bcd16a0ed5aa87c4ad84f033e9be3acaa138 Mon Sep 17 00:00:00 2001 From: Aaron Schlesinger Date: Fri, 18 Mar 2016 16:28:42 -0700 Subject: [PATCH 447/501] Add documentation for how to register new StorageDrivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds context-specific documentation on StorageDriver, StorageDriverFactory, and the factory’s Register func, explaining how the internal registration mechanism should be used. This documentation follows from the thread starting at https://github.com/deis/builder/pull/262/files#r56720200. cc/ @stevvooe Signed-off-by: Aaron Schlesinger --- docs/storage/driver/factory/factory.go | 11 ++++++++++- docs/storage/driver/storagedriver.go | 9 ++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/factory/factory.go b/docs/storage/driver/factory/factory.go index e84f0026..a9c04ec5 100644 --- a/docs/storage/driver/factory/factory.go +++ b/docs/storage/driver/factory/factory.go @@ -11,7 +11,14 @@ import ( var driverFactories = make(map[string]StorageDriverFactory) // StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams type StorageDriverFactory interface { // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored @@ -21,6 +28,8 @@ type StorageDriverFactory interface { // Register makes a storage driver available by the provided name. // If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. func Register(name string, factory StorageDriverFactory) { if factory == nil { panic("Must not provide nil StorageDriverFactory") diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index 2ae9a67e..c27e1031 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -34,7 +34,14 @@ func (version Version) Minor() uint { const CurrentVersion Version = "0.1" // StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). +// See below for an example of how to get a StorageDriver for S3: +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams type StorageDriver interface { // Name returns the human-readable "name" of the driver, useful in error // messages and logging. By convention, this will just be the registration From 091ad89197b7b0c22e04e0aac1749e2ca4218b43 Mon Sep 17 00:00:00 2001 From: Aaron Schlesinger Date: Thu, 24 Mar 2016 09:35:04 -0700 Subject: [PATCH 448/501] Remove the example Instead, direct users to the one in the factory package Signed-off-by: Aaron Schlesinger --- docs/storage/driver/storagedriver.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/storage/driver/storagedriver.go b/docs/storage/driver/storagedriver.go index c27e1031..548a17d8 100644 --- a/docs/storage/driver/storagedriver.go +++ b/docs/storage/driver/storagedriver.go @@ -37,11 +37,8 @@ const CurrentVersion Version = "0.1" // filesystem-like key/value object storage. Storage Drivers are automatically // registered via an internal registration mechanism, and generally created // via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). -// See below for an example of how to get a StorageDriver for S3: -// -// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" -// s3Driver, err = factory.Create("s3", storageParams) -// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver type StorageDriver interface { // Name returns the human-readable "name" of the driver, useful in error // messages and logging. By convention, this will just be the registration From b015bf067648a263a8a2afd60b098fae69e75845 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Wed, 30 Mar 2016 16:04:01 +0200 Subject: [PATCH 449/501] don't swallow errors in Swift driver's GetContent() In 000dec3c6f6e92ec20cb86d1375ec82d2f6062b3, which was only intended to be a refactoring commit, the behavior of this block subtly changed so that unknown types of errors would be swallowed instead of propagated. I noticed this while investigating an error similar to #1539 aka docker/docker#21290. It appears that during GetContent() for a hashstate, the Swift proxy produces an error. Since this error was silently swallowed, an empty []byte is used to restart the hash, then producing the digest of the empty string instead of the layer's digest. This PR will not fix the issue, but it should make the actual error more visible by propagating it into `blobWriter#resumeDigest' and 'blobWriter#validateBlob', respectively. Signed-off-by: Stefan Majewsky --- docs/storage/driver/swift/swift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index c4d5a574..dd322be2 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -280,7 +280,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } - return content, nil + return content, err } // PutContent stores the []byte content at a location designated by "path". From 59ef6d2d40a44c6699ad30a890f93f9954984fff Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 30 Mar 2016 11:35:24 -0700 Subject: [PATCH 450/501] garbagecollect: Clean up errors - Clean up error messages - Add a missing error check on the result of blobService.Enumerate. Signed-off-by: Aaron Lehmann --- docs/garbagecollect.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index add25a73..ecb64c98 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -26,7 +26,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) if !ok { - return fmt.Errorf("coercion error: unable to convert Namespace to RepositoryEnumerator") + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") } // mark @@ -49,7 +49,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) if !ok { - return fmt.Errorf("coercion error: unable to convert ManifestService into ManifestEnumerator") + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") } err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { @@ -70,7 +70,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error case *schema1.SignedManifest: signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) if !ok { - return fmt.Errorf("coercion error: unable to convert ManifestSErvice into SignaturesGetter") + return fmt.Errorf("unable to convert ManifestService into SignaturesGetter") } signatures, err := signaturesGetter.GetSignatures(ctx, dgst) if err != nil { @@ -106,6 +106,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error } return nil }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } // Construct vacuum vacuum := storage.NewVacuum(ctx, storageDriver) From 15e3ffb3f296ff8548216dde820bb17af2bb8d8f Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 23 Mar 2016 16:42:50 -0700 Subject: [PATCH 451/501] Add a --dry-run flag. If enabled this will print the mark and sweep process with removing any files. Signed-off-by: Richard Scothern --- docs/garbagecollect.go | 31 +++++++++++++++++++++++++------ docs/storage/manifeststore.go | 7 +++---- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index ecb64c98..cfeee078 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -13,12 +13,16 @@ import ( "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/spf13/cobra" ) +func emit(ctx context.Context, s string) { + if dryRun { + context.GetLogger(ctx).Infof("gc: %s", s) + } +} + func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error { - // Construct a registry registry, err := storage.NewRegistry(ctx, storageDriver) if err != nil { return fmt.Errorf("failed to construct registry: %v", err) @@ -32,6 +36,8 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error // mark markSet := make(map[digest.Digest]struct{}) err = repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(ctx, fmt.Sprint(repoName)) + var err error named, err := reference.ParseNamed(repoName) if err != nil { @@ -53,7 +59,8 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error } err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { - // Mark the manifest's blob + // Mark the manifest's blo + emit(ctx, fmt.Sprintf("%s: adding manifest %s ", repoName, dgst)) markSet[dgst] = struct{}{} manifest, err := manifestService.Get(ctx, dgst) @@ -64,6 +71,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error descriptors := manifest.References() for _, descriptor := range descriptors { markSet[descriptor.Digest] = struct{}{} + emit(ctx, fmt.Sprintf("%s: marking blob %v", repoName, descriptor)) } switch manifest.(type) { @@ -77,11 +85,13 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error return fmt.Errorf("failed to get signatures for signed manifest: %v", err) } for _, signatureDigest := range signatures { + emit(ctx, fmt.Sprintf("%s: marking signature %s", repoName, signatureDigest)) markSet[signatureDigest] = struct{}{} } break case *schema2.DeserializedManifest: config := manifest.(*schema2.DeserializedManifest).Config + emit(ctx, fmt.Sprintf("%s: marking configuration %s", repoName, config.Digest)) markSet[config.Digest] = struct{}{} break } @@ -113,6 +123,10 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error // Construct vacuum vacuum := storage.NewVacuum(ctx, storageDriver) for dgst := range deleteSet { + if dryRun { + emit(ctx, fmt.Sprintf("deleting %s", dgst)) + continue + } err = vacuum.RemoveBlob(string(dgst)) if err != nil { return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err) @@ -122,13 +136,18 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error return err } +func init() { + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything expect remove the blobs") +} + +var dryRun bool + // GCCmd is the cobra command that corresponds to the garbage-collect subcommand var GCCmd = &cobra.Command{ Use: "garbage-collect ", - Short: "`garbage-collects` deletes layers not referenced by any manifests", - Long: "`garbage-collects` deletes layers not referenced by any manifests", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", Run: func(cmd *cobra.Command, args []string) { - config, err := resolveConfiguration(args) if err != nil { fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index f3660c98..e0b82309 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -161,16 +161,15 @@ func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest diges return nil, err } - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := ms.blobStore.driver.List(ctx, signaturesPath) + alg := string(digest.SHA256) + signaturePaths, err := ms.blobStore.driver.List(ctx, path.Join(signaturesPath, alg)) if err != nil { return nil, err } var digests []digest.Digest for _, sigPath := range signaturePaths { - sigdigest, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) + sigdigest, err := digest.ParseDigest(alg + ":" + path.Base(sigPath)) if err != nil { // merely found not a digest continue From 31ece3d3b68875f0bb884deaef28833689536733 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Thu, 24 Mar 2016 16:03:25 -0700 Subject: [PATCH 452/501] Fix signature handling with GC. If a schema 1 manifest is uploaded with the `disablesignaturestore` option set to true, then no signatures will exist. Handle this case. If a schema 1 manifest is pushed, deleted, garbage collected and pushed again, the repository will contain signature links from the first version, but the blobs will not exist. Disable the signature store in the garbage-collect command so signatures are not fetched. Signed-off-by: Richard Scothern --- docs/garbagecollect.go | 43 ++++++++++++++++++++++------------- docs/garbagecollect_test.go | 6 ++--- docs/storage/manifeststore.go | 12 ++++++++-- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index cfeee078..8df956b9 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -13,20 +13,18 @@ import ( "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/libtrust" "github.com/spf13/cobra" ) -func emit(ctx context.Context, s string) { +func emit(format string, a ...interface{}) { if dryRun { - context.GetLogger(ctx).Infof("gc: %s", s) + fmt.Printf(format, a...) + fmt.Println("") } } -func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error { - registry, err := storage.NewRegistry(ctx, storageDriver) - if err != nil { - return fmt.Errorf("failed to construct registry: %v", err) - } +func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace) error { repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) if !ok { @@ -35,8 +33,8 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error // mark markSet := make(map[digest.Digest]struct{}) - err = repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - emit(ctx, fmt.Sprint(repoName)) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + emit(repoName) var err error named, err := reference.ParseNamed(repoName) @@ -59,8 +57,8 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error } err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { - // Mark the manifest's blo - emit(ctx, fmt.Sprintf("%s: adding manifest %s ", repoName, dgst)) + // Mark the manifest's blob + emit("%s: marking manifest %s ", repoName, dgst) markSet[dgst] = struct{}{} manifest, err := manifestService.Get(ctx, dgst) @@ -71,7 +69,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error descriptors := manifest.References() for _, descriptor := range descriptors { markSet[descriptor.Digest] = struct{}{} - emit(ctx, fmt.Sprintf("%s: marking blob %v", repoName, descriptor)) + emit("%s: marking blob %s", repoName, descriptor.Digest) } switch manifest.(type) { @@ -85,13 +83,13 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error return fmt.Errorf("failed to get signatures for signed manifest: %v", err) } for _, signatureDigest := range signatures { - emit(ctx, fmt.Sprintf("%s: marking signature %s", repoName, signatureDigest)) + emit("%s: marking signature %s", repoName, signatureDigest) markSet[signatureDigest] = struct{}{} } break case *schema2.DeserializedManifest: config := manifest.(*schema2.DeserializedManifest).Config - emit(ctx, fmt.Sprintf("%s: marking configuration %s", repoName, config.Digest)) + emit("%s: marking configuration %s", repoName, config.Digest) markSet[config.Digest] = struct{}{} break } @@ -120,11 +118,12 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver) error return fmt.Errorf("error enumerating blobs: %v", err) } + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) // Construct vacuum vacuum := storage.NewVacuum(ctx, storageDriver) for dgst := range deleteSet { if dryRun { - emit(ctx, fmt.Sprintf("deleting %s", dgst)) + emit("deleting %s", dgst) continue } err = vacuum.RemoveBlob(string(dgst)) @@ -168,7 +167,19 @@ var GCCmd = &cobra.Command{ os.Exit(1) } - err = markAndSweep(ctx, driver) + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprintf(os.Stderr, "%s", err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = markAndSweep(ctx, driver, registry) if err != nil { fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) os.Exit(1) diff --git a/docs/garbagecollect_test.go b/docs/garbagecollect_test.go index 6096e758..dd5fadd5 100644 --- a/docs/garbagecollect_test.go +++ b/docs/garbagecollect_test.go @@ -161,7 +161,7 @@ func TestNoDeletionNoEffect(t *testing.T) { } // Run GC - err = markAndSweep(context.Background(), inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver, registry) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -193,7 +193,7 @@ func TestDeletionHasEffect(t *testing.T) { manifests.Delete(ctx, image3.manifestDigest) // Run GC - err = markAndSweep(context.Background(), inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver, registry) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -327,7 +327,7 @@ func TestOrphanBlobDeleted(t *testing.T) { uploadRandomSchema2Image(t, repo) // Run GC - err = markAndSweep(context.Background(), inmemoryDriver) + err = markAndSweep(context.Background(), inmemoryDriver, registry) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index e0b82309..5a9165f9 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver" ) // A ManifestHandler gets and puts manifests of a particular type. @@ -161,13 +162,20 @@ func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest diges return nil, err } + var digests []digest.Digest alg := string(digest.SHA256) signaturePaths, err := ms.blobStore.driver.List(ctx, path.Join(signaturesPath, alg)) - if err != nil { + + switch err.(type) { + case nil: + break + case driver.PathNotFoundError: + // Manifest may have been pushed with signature store disabled + return digests, nil + default: return nil, err } - var digests []digest.Digest for _, sigPath := range signaturePaths { sigdigest, err := digest.ParseDigest(alg + ":" + path.Base(sigPath)) if err != nil { From 3d4b652b589e060439d60f9ab84f6a3676399228 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 29 Mar 2016 10:47:22 -0700 Subject: [PATCH 453/501] Update the gc documentation. Signed-off-by: Richard Scothern --- docs/garbagecollect.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index 8df956b9..1be4546d 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -19,8 +19,7 @@ import ( func emit(format string, a ...interface{}) { if dryRun { - fmt.Printf(format, a...) - fmt.Println("") + fmt.Printf(format+"\n", a...) } } @@ -122,8 +121,8 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis // Construct vacuum vacuum := storage.NewVacuum(ctx, storageDriver) for dgst := range deleteSet { + emit("blob eligible for deletion: %s", dgst) if dryRun { - emit("deleting %s", dgst) continue } err = vacuum.RemoveBlob(string(dgst)) @@ -169,7 +168,7 @@ var GCCmd = &cobra.Command{ k, err := libtrust.GenerateECP256PrivateKey() if err != nil { - fmt.Fprintf(os.Stderr, "%s", err) + fmt.Fprint(os.Stderr, err) os.Exit(1) } From 86ca50dfe516f3ac2b6b463b5c546308921c2bfe Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Mon, 4 Apr 2016 17:18:09 -0700 Subject: [PATCH 454/501] Ensure we log io.Copy errors and bytes copied/total in uploads Signed-off-by: Tony Holdstock-Brown --- docs/handlers/helpers.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index fe44f557..b56c1566 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -46,7 +46,11 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) - ctxu.GetLogger(context).Error("client disconnected during " + action) + ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": r.ContentLength, + }, "error", "copied", "contentLength").Error("client disconnected during " + action) return errors.New("client disconnected") default: } From c655241209b18172aee2129957bbf9f460f563e7 Mon Sep 17 00:00:00 2001 From: Arien Holthuizen Date: Wed, 6 Apr 2016 13:34:14 +0200 Subject: [PATCH 455/501] Only check validity of S3 region if not using custom endpoint Signed-off-by: Arien Holthuizen --- docs/storage/driver/s3-aws/s3.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index 8683f80e..f7facb61 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -136,14 +136,21 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { secretKey = "" } + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + regionName, ok := parameters["region"] if regionName == nil || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } region := fmt.Sprint(regionName) - _, ok = validRegions[region] - if !ok { - return nil, fmt.Errorf("Invalid region provided: %v", region) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok = validRegions[region]; !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } } bucket := parameters["bucket"] @@ -151,11 +158,6 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("No bucket parameter provided") } - regionEndpoint := parameters["regionendpoint"] - if regionEndpoint == nil { - regionEndpoint = "" - } - encryptBool := false encrypt := parameters["encrypt"] switch encrypt := encrypt.(type) { From 84aa48b56cf0acb29a3873e430c9e00d4c2027c1 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Tue, 5 Apr 2016 16:46:39 +0200 Subject: [PATCH 456/501] detect outdated container listings during Stat() and getAllSegments() Signed-off-by: Stefan Majewsky --- docs/storage/driver/swift/swift.go | 84 +++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 20 deletions(-) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index dd322be2..91384828 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -335,7 +335,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged if err != nil { return nil, err } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, segmentPath(segmentsPath, len(segments))); err != nil { + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { return nil, err } segments = []swift.Object{info} @@ -376,23 +376,26 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, fi.IsDir = true return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } else if obj.Name == swiftPath { - // On Swift 1.12, the 'bytes' field is always 0 - // so we need to do a second HEAD request - info, _, err := d.Conn.Object(d.Container, swiftPath) - if err != nil { - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - fi.IsDir = false - fi.Size = info.Bytes - fi.ModTime = info.LastModified - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so + // we need to do a separate HEAD request. + break } } - return nil, storagedriver.PathNotFoundError{Path: path} + //Don't trust an empty `objects` slice. A container listing can be + //outdated. For files, we can make a HEAD request on the object which + //reports existence (at least) much more reliably. + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given path. @@ -589,11 +592,52 @@ func (d *driver) swiftSegmentPath(path string) (string, error) { } func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + //a simple container listing works 99.9% of the time segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //build a lookup table by object name + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentPath := getSegmentPath(path, segmentNumber) + + if _, seen := hasObjectName[segmentPath]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := d.Conn.Object(d.Container, segmentPath) + switch err { + case nil: + //found new segment -> keep going, more might be missing + segments = append(segments, segment) + continue + case swift.ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } } - return segments, err } func (d *driver) createManifest(path string, segments string) error { @@ -632,7 +676,7 @@ func generateSecret() (string, error) { return hex.EncodeToString(secretBytes[:]), nil } -func segmentPath(segmentsPath string, partNumber int) string { +func getSegmentPath(segmentsPath string, partNumber int) string { return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) } @@ -769,7 +813,7 @@ func (sw *segmentWriter) Write(p []byte) (int, error) { if offset+chunkSize > len(p) { chunkSize = len(p) - offset } - _, err := sw.conn.ObjectPut(sw.container, segmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) if err != nil { return n, err } From 63fe2d1429d8908b8b8abb59acf0cf887a662dbb Mon Sep 17 00:00:00 2001 From: Nikita Date: Wed, 13 Apr 2016 19:32:10 +0400 Subject: [PATCH 457/501] Update swift.go Signed-off-by: Nikita Tarasov --- docs/storage/driver/swift/swift.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 91384828..4c115030 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -69,6 +69,7 @@ type Parameters struct { DomainID string TrustID string Region string + AuthVersion int Container string Prefix string InsecureSkipVerify bool @@ -174,6 +175,7 @@ func New(params Parameters) (*Driver, error) { ApiKey: params.Password, AuthUrl: params.AuthURL, Region: params.Region, + AuthVersion: params.AuthVersion, UserAgent: "distribution/" + version.Version, Tenant: params.Tenant, TenantId: params.TenantID, From b55719daaac8f12f6f937a2bd60fca72b354b00e Mon Sep 17 00:00:00 2001 From: Nikita Date: Wed, 13 Apr 2016 19:37:45 +0400 Subject: [PATCH 458/501] test Signed-off-by: Nikita Tarasov --- docs/storage/driver/swift/swift_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index b2ff6001..bffd54e8 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -33,6 +33,7 @@ func init() { trustID string container string region string + AuthVersion int insecureSkipVerify bool secretKey string accessKey string @@ -52,6 +53,7 @@ func init() { trustID = os.Getenv("SWIFT_TRUST_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") + AuthVersion = os.Getenv("SWIFT_AUTH_VERSION") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) secretKey = os.Getenv("SWIFT_SECRET_KEY") accessKey = os.Getenv("SWIFT_ACCESS_KEY") @@ -85,6 +87,7 @@ func init() { domainID, trustID, region, + AuthVersion, container, root, insecureSkipVerify, From 007af250b4fe27b624f191add68fe0bd42d58538 Mon Sep 17 00:00:00 2001 From: Nikita Tarasov Date: Wed, 13 Apr 2016 19:06:33 +0300 Subject: [PATCH 459/501] fix test Signed-off-by: Nikita Tarasov Signed-off-by: Nikita Tarasov --- docs/storage/driver/swift/swift_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index bffd54e8..b4f1c738 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -53,7 +53,7 @@ func init() { trustID = os.Getenv("SWIFT_TRUST_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") - AuthVersion = os.Getenv("SWIFT_AUTH_VERSION") + AuthVersion = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) secretKey = os.Getenv("SWIFT_SECRET_KEY") accessKey = os.Getenv("SWIFT_ACCESS_KEY") From 346bfed9079b8b0c07b88273c9518ee824f5096e Mon Sep 17 00:00:00 2001 From: Nikita Tarasov Date: Sun, 17 Apr 2016 20:05:51 +0300 Subject: [PATCH 460/501] docs + fix test Signed-off-by: Nikita Tarasov --- docs/storage/driver/swift/swift_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index b4f1c738..655aa996 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -53,7 +53,7 @@ func init() { trustID = os.Getenv("SWIFT_TRUST_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") - AuthVersion = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) + AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) secretKey = os.Getenv("SWIFT_SECRET_KEY") accessKey = os.Getenv("SWIFT_ACCESS_KEY") From ea5abc9935d6d9f915f837cdea850268f1df7f29 Mon Sep 17 00:00:00 2001 From: Stefan Majewsky Date: Tue, 19 Apr 2016 13:48:08 +0200 Subject: [PATCH 461/501] wait for DLO segments to show up when Close()ing the writer Not just when Commit()ing the result. This fixes some errors I observed when the layer (i.e. the DLO) is Stat()ed immediately after closing, and reports the wrong file size because the container listing is not yet up-to-date. Signed-off-by: Stefan Majewsky --- docs/storage/driver/swift/swift.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index 91384828..0cc037af 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -742,6 +742,9 @@ func (w *writer) Close() error { if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { return err } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } } w.closed = true @@ -776,10 +779,14 @@ func (w *writer) Commit() error { } w.committed = true + return w.waitForSegmentsToShowUp() +} +func (w *writer) waitForSegmentsToShowUp() error { var err error waitingTime := readAfterWriteWait endTime := time.Now().Add(readAfterWriteTimeout) + for { var info swift.Object if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { From fdb0fb77df6189794468565e95e79f9f6a97ea3c Mon Sep 17 00:00:00 2001 From: jhaohai Date: Thu, 21 Apr 2016 11:51:34 +0800 Subject: [PATCH 462/501] add cn-north-1 to valid check Signed-off-by: jhaohai --- docs/storage/driver/s3-aws/s3.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index f7facb61..bfaa6ba9 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -80,6 +80,7 @@ func init() { "ap-northeast-1", "ap-northeast-2", "sa-east-1", + "cn-north-1", } { validRegions[region] = struct{}{} } From a691d82aee9784b83434fb3482ac89a4cec381d9 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Thu, 21 Apr 2016 15:54:48 -0700 Subject: [PATCH 463/501] add middleware storage driver for redirect Signed-off-by: Andrew Hsu (github: andrewhsu) --- .../driver/middleware/redirect/middleware.go | 47 ++++++++++++++ .../middleware/redirect/middleware_test.go | 62 +++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 docs/storage/driver/middleware/redirect/middleware.go create mode 100644 docs/storage/driver/middleware/redirect/middleware_test.go diff --git a/docs/storage/driver/middleware/redirect/middleware.go b/docs/storage/driver/middleware/redirect/middleware.go new file mode 100644 index 00000000..286a84ab --- /dev/null +++ b/docs/storage/driver/middleware/redirect/middleware.go @@ -0,0 +1,47 @@ +package middleware + +import ( + "fmt" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "net/url" + "strings" +) + +type redirectStorageMiddleware struct { + storagedriver.StorageDriver + scheme string + host string +} + +var _ storagedriver.StorageDriver = &redirectStorageMiddleware{} + +func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + o, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + b, ok := o.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + if !strings.Contains(b, "://") { + b = "https://" + b + } + u, err := url.Parse(b) + if err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } + + return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil +} + +func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + u := &url.URL{Scheme: r.scheme, Host: r.host, Path: path} + return u.String(), nil +} + +func init() { + storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) +} diff --git a/docs/storage/driver/middleware/redirect/middleware_test.go b/docs/storage/driver/middleware/redirect/middleware_test.go new file mode 100644 index 00000000..31b661b6 --- /dev/null +++ b/docs/storage/driver/middleware/redirect/middleware_test.go @@ -0,0 +1,62 @@ +package middleware + +import ( + check "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { check.TestingT(t) } + +type MiddlewareSuite struct{} + +var _ = check.Suite(&MiddlewareSuite{}) + +func (s *MiddlewareSuite) TestNoConfig(c *check.C) { + options := make(map[string]interface{}) + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no baseurl provided") +} + +func (s *MiddlewareSuite) TestDefaultScheme(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "https") + c.Assert(m.host, check.Equals, "example.com") +} + +func (s *MiddlewareSuite) TestHTTPS(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "https://example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "https") + c.Assert(m.host, check.Equals, "example.com") + + url, err := middleware.URLFor(nil, "/rick/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "https://example.com/rick/data") +} + +func (s *MiddlewareSuite) TestHTTP(c *check.C) { + options := make(map[string]interface{}) + options["baseurl"] = "http://example.com" + middleware, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.Equals, nil) + + m, ok := middleware.(*redirectStorageMiddleware) + c.Assert(ok, check.Equals, true) + c.Assert(m.scheme, check.Equals, "http") + c.Assert(m.host, check.Equals, "example.com") + + url, err := middleware.URLFor(nil, "morty/data", nil) + c.Assert(err, check.Equals, nil) + c.Assert(url, check.Equals, "http://example.com/morty/data") +} From 6615b77a0903d24a6cccac1ae653eeae8e92c639 Mon Sep 17 00:00:00 2001 From: Serge Dubrouski Date: Thu, 21 Apr 2016 20:04:22 -0600 Subject: [PATCH 464/501] Add blobWrtiter.Close() call into blobWriter.Commit() Signed-off-by: Serge Dubrouski --- docs/storage/blobwriter.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 7f280d36..418df818 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -56,6 +56,8 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) return distribution.Descriptor{}, err } + bw.Close() + canonical, err := bw.validateBlob(ctx, desc) if err != nil { return distribution.Descriptor{}, err From d11a979591ce6f6c856366c1edd1bf539b740f39 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Sat, 23 Apr 2016 11:13:15 +0100 Subject: [PATCH 465/501] Sorting completed parts by part number for a better accordance with the S3 spec Signed-off-by: Anis Elleuch --- docs/storage/driver/s3-aws/s3.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index f7facb61..4122a4af 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -18,6 +18,7 @@ import ( "io/ioutil" "net/http" "reflect" + "sort" "strconv" "strings" "time" @@ -718,6 +719,12 @@ func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver } } +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + func (w *writer) Write(p []byte) (int, error) { if w.closed { return 0, fmt.Errorf("already closed") @@ -730,19 +737,22 @@ func (w *writer) Write(p []byte) (int, error) { // If the last written part is smaller than minChunkSize, we need to make a // new multipart upload :sadface: if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { - var completedParts []*s3.CompletedPart + var completedUploadedParts completedParts for _, part := range w.parts { - completedParts = append(completedParts, &s3.CompletedPart{ + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ ETag: part.ETag, PartNumber: part.PartNumber, }) } + + sort.Sort(completedUploadedParts) + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(w.driver.Bucket), Key: aws.String(w.key), UploadId: aws.String(w.uploadID), MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: completedParts, + Parts: completedUploadedParts, }, }) if err != nil { @@ -882,19 +892,23 @@ func (w *writer) Commit() error { return err } w.committed = true - var completedParts []*s3.CompletedPart + + var completedUploadedParts completedParts for _, part := range w.parts { - completedParts = append(completedParts, &s3.CompletedPart{ + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ ETag: part.ETag, PartNumber: part.PartNumber, }) } + + sort.Sort(completedUploadedParts) + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(w.driver.Bucket), Key: aws.String(w.key), UploadId: aws.String(w.uploadID), MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: completedParts, + Parts: completedUploadedParts, }, }) if err != nil { From cec7248bd1578f9f6929c306af20d3dd7cdced64 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Mon, 25 Apr 2016 09:32:36 -0700 Subject: [PATCH 466/501] separate the go/non-go imports and reorder Signed-off-by: Andrew Hsu (github: andrewhsu) --- docs/storage/driver/middleware/redirect/middleware.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/storage/driver/middleware/redirect/middleware.go b/docs/storage/driver/middleware/redirect/middleware.go index 286a84ab..a806bc0f 100644 --- a/docs/storage/driver/middleware/redirect/middleware.go +++ b/docs/storage/driver/middleware/redirect/middleware.go @@ -2,11 +2,12 @@ package middleware import ( "fmt" + "net/url" + "strings" + "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "net/url" - "strings" ) type redirectStorageMiddleware struct { From fba2e3a206bdc39dbbfb57f3ec252307a720c5b9 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Mon, 25 Apr 2016 10:28:32 -0700 Subject: [PATCH 467/501] scheme and host mandatory in baseurl Signed-off-by: Andrew Hsu (github: andrewhsu) --- .../storage/driver/middleware/redirect/middleware.go | 12 +++++++----- .../driver/middleware/redirect/middleware_test.go | 11 +++-------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/docs/storage/driver/middleware/redirect/middleware.go b/docs/storage/driver/middleware/redirect/middleware.go index a806bc0f..20cd7daa 100644 --- a/docs/storage/driver/middleware/redirect/middleware.go +++ b/docs/storage/driver/middleware/redirect/middleware.go @@ -3,7 +3,6 @@ package middleware import ( "fmt" "net/url" - "strings" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -27,12 +26,15 @@ func newRedirectStorageMiddleware(sd storagedriver.StorageDriver, options map[st if !ok { return nil, fmt.Errorf("baseurl must be a string") } - if !strings.Contains(b, "://") { - b = "https://" + b - } u, err := url.Parse(b) if err != nil { - return nil, fmt.Errorf("invalid baseurl: %v", err) + return nil, fmt.Errorf("unable to parse redirect baseurl: %s", b) + } + if u.Scheme == "" { + return nil, fmt.Errorf("no scheme specified for redirect baseurl") + } + if u.Host == "" { + return nil, fmt.Errorf("no host specified for redirect baseurl") } return &redirectStorageMiddleware{StorageDriver: sd, scheme: u.Scheme, host: u.Host}, nil diff --git a/docs/storage/driver/middleware/redirect/middleware_test.go b/docs/storage/driver/middleware/redirect/middleware_test.go index 31b661b6..5fffafba 100644 --- a/docs/storage/driver/middleware/redirect/middleware_test.go +++ b/docs/storage/driver/middleware/redirect/middleware_test.go @@ -17,16 +17,11 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) { c.Assert(err, check.ErrorMatches, "no baseurl provided") } -func (s *MiddlewareSuite) TestDefaultScheme(c *check.C) { +func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { options := make(map[string]interface{}) options["baseurl"] = "example.com" - middleware, err := newRedirectStorageMiddleware(nil, options) - c.Assert(err, check.Equals, nil) - - m, ok := middleware.(*redirectStorageMiddleware) - c.Assert(ok, check.Equals, true) - c.Assert(m.scheme, check.Equals, "https") - c.Assert(m.host, check.Equals, "example.com") + _, err := newRedirectStorageMiddleware(nil, options) + c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") } func (s *MiddlewareSuite) TestHTTPS(c *check.C) { From 3336cc13e45a33fdcc5954064f8090d187979380 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Mon, 25 Apr 2016 11:40:21 -0700 Subject: [PATCH 468/501] modify redirect test to include port Signed-off-by: Andrew Hsu (github: andrewhsu) --- .../storage/driver/middleware/redirect/middleware_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/storage/driver/middleware/redirect/middleware_test.go b/docs/storage/driver/middleware/redirect/middleware_test.go index 5fffafba..82f4a561 100644 --- a/docs/storage/driver/middleware/redirect/middleware_test.go +++ b/docs/storage/driver/middleware/redirect/middleware_test.go @@ -24,20 +24,20 @@ func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") } -func (s *MiddlewareSuite) TestHTTPS(c *check.C) { +func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { options := make(map[string]interface{}) - options["baseurl"] = "https://example.com" + options["baseurl"] = "https://example.com:5443" middleware, err := newRedirectStorageMiddleware(nil, options) c.Assert(err, check.Equals, nil) m, ok := middleware.(*redirectStorageMiddleware) c.Assert(ok, check.Equals, true) c.Assert(m.scheme, check.Equals, "https") - c.Assert(m.host, check.Equals, "example.com") + c.Assert(m.host, check.Equals, "example.com:5443") url, err := middleware.URLFor(nil, "/rick/data", nil) c.Assert(err, check.Equals, nil) - c.Assert(url, check.Equals, "https://example.com/rick/data") + c.Assert(url, check.Equals, "https://example.com:5443/rick/data") } func (s *MiddlewareSuite) TestHTTP(c *check.C) { From 54edbdfee655639ee747135133c78f2cdf427ee7 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Tue, 26 Apr 2016 14:33:54 -0700 Subject: [PATCH 469/501] separate the go/non-go imports and reorder Signed-off-by: Andrew Hsu (github: andrewhsu) --- docs/storage/driver/middleware/redirect/middleware_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/middleware/redirect/middleware_test.go b/docs/storage/driver/middleware/redirect/middleware_test.go index 82f4a561..1eb6309f 100644 --- a/docs/storage/driver/middleware/redirect/middleware_test.go +++ b/docs/storage/driver/middleware/redirect/middleware_test.go @@ -1,8 +1,9 @@ package middleware import ( - check "gopkg.in/check.v1" "testing" + + check "gopkg.in/check.v1" ) func Test(t *testing.T) { check.TestingT(t) } From a88088a59d590146e6e28867f4078b6d28a0fe51 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Sat, 27 Feb 2016 15:37:07 -0800 Subject: [PATCH 470/501] Regulate filesystem driver to max of 100 calls It's easily possible for a flood of requests to trigger thousands of concurrent file accesses on the storage driver. Each file I/O call creates a new OS thread that is not reaped by the Golang runtime. By limiting it to only 100 at a time we can effectively bound the number of OS threads in use by the storage driver. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) Signed-off-by: Tony Holdstock-Brown --- docs/storage/driver/base/regulator.go | 150 +++++++++++++++++++++++ docs/storage/driver/filesystem/driver.go | 6 +- 2 files changed, 153 insertions(+), 3 deletions(-) create mode 100644 docs/storage/driver/base/regulator.go diff --git a/docs/storage/driver/base/regulator.go b/docs/storage/driver/base/regulator.go new file mode 100644 index 00000000..21ddfe57 --- /dev/null +++ b/docs/storage/driver/base/regulator.go @@ -0,0 +1,150 @@ +package base + +import ( + "io" + "sync" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +type regulator struct { + storagedriver.StorageDriver + sync.Cond + + available uint +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.Cond{ + L: &sync.Mutex{}, + }, + available: limit, + } +} + +func (r *regulator) condition() bool { + return r.available > 0 +} + +func (r *regulator) enter() { + r.L.Lock() + defer r.L.Unlock() + + for !r.condition() { + r.Wait() + } + + r.available-- +} + +func (r *regulator) exit() { + r.L.Lock() + defer r.Signal() + defer r.L.Unlock() + + r.available++ +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.ReadStream(ctx, path, offset) +} + +// WriteStream stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { + r.enter() + defer r.exit() + + return r.StorageDriver.WriteStream(ctx, path, offset, reader) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +// Note: This may be no more efficient than a copy followed by a delete for +// many implementations. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// May return an ErrUnsupportedMethod in certain StorageDriver +// implementations. +func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.URLFor(ctx, path, options) +} diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 3bbdc637..e22e9809 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -60,12 +60,12 @@ func FromParameters(parameters map[string]interface{}) *Driver { // New constructs a new Driver with a given rootDirectory func New(rootDirectory string) *Driver { + fsDriver := &driver{rootDirectory: rootDirectory} + return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ - StorageDriver: &driver{ - rootDirectory: rootDirectory, - }, + StorageDriver: base.NewRegulator(fsDriver, 100), }, }, } From 8775da93d60e55f5f671909ceca467a2b7906e08 Mon Sep 17 00:00:00 2001 From: Serge Dubrouski Date: Tue, 26 Apr 2016 19:44:23 -0600 Subject: [PATCH 471/501] Fix wording for dry-run flag in useage message for garbage collector. Signed-off-by: Serge Dubrouski --- docs/garbagecollect.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index 1be4546d..7e1d97d9 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -135,7 +135,7 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } func init() { - GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything expect remove the blobs") + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") } var dryRun bool From 898fdb48a1f694b4d317ad08e74d37254a5addfc Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Mon, 25 Apr 2016 21:14:00 -0700 Subject: [PATCH 472/501] Ensure GC continues marking if _manifests is nonexistent Signed-off-by: Tony Holdstock-Brown --- docs/garbagecollect.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/garbagecollect.go b/docs/garbagecollect.go index 7e1d97d9..65d432e0 100644 --- a/docs/garbagecollect.go +++ b/docs/garbagecollect.go @@ -96,6 +96,17 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis return nil }) + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + return err }) From 3a034b477e827559fe72c0a01bed12f2f758488c Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 27 Apr 2016 11:49:01 -0700 Subject: [PATCH 473/501] Move garbage collect code into storage package Signed-off-by: Richard Scothern --- docs/root.go | 56 +++++++++++++ docs/{ => storage}/garbagecollect.go | 96 ++++++----------------- docs/{ => storage}/garbagecollect_test.go | 11 ++- 3 files changed, 85 insertions(+), 78 deletions(-) rename docs/{ => storage}/garbagecollect.go (60%) rename docs/{ => storage}/garbagecollect_test.go (96%) diff --git a/docs/root.go b/docs/root.go index 46338b46..7a7d44cb 100644 --- a/docs/root.go +++ b/docs/root.go @@ -1,7 +1,14 @@ package registry import ( + "fmt" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" + "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/version" + "github.com/docker/libtrust" "github.com/spf13/cobra" ) @@ -10,6 +17,7 @@ var showVersion bool func init() { RootCmd.AddCommand(ServeCmd) RootCmd.AddCommand(GCCmd) + GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") } @@ -26,3 +34,51 @@ var RootCmd = &cobra.Command{ cmd.Usage() }, } + +var dryRun bool + +// GCCmd is the cobra command that corresponds to the garbage-collect subcommand +var GCCmd = &cobra.Command{ + Use: "garbage-collect ", + Short: "`garbage-collect` deletes layers not referenced by any manifests", + Long: "`garbage-collect` deletes layers not referenced by any manifests", + Run: func(cmd *cobra.Command, args []string) { + config, err := resolveConfiguration(args) + if err != nil { + fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) + cmd.Usage() + os.Exit(1) + } + + driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) + os.Exit(1) + } + + ctx := context.Background() + ctx, err = configureLogging(ctx, config) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) + os.Exit(1) + } + + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + + registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) + os.Exit(1) + } + + err = storage.MarkAndSweep(ctx, driver, registry, dryRun) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) + os.Exit(1) + } + }, +} diff --git a/docs/garbagecollect.go b/docs/storage/garbagecollect.go similarity index 60% rename from docs/garbagecollect.go rename to docs/storage/garbagecollect.go index 65d432e0..be64b847 100644 --- a/docs/garbagecollect.go +++ b/docs/storage/garbagecollect.go @@ -1,8 +1,7 @@ -package registry +package storage import ( "fmt" - "os" "github.com/docker/distribution" "github.com/docker/distribution/context" @@ -10,21 +9,15 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/libtrust" - "github.com/spf13/cobra" ) func emit(format string, a ...interface{}) { - if dryRun { - fmt.Printf(format+"\n", a...) - } + fmt.Printf(format+"\n", a...) } -func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace) error { - +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) if !ok { return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") @@ -33,7 +26,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis // mark markSet := make(map[digest.Digest]struct{}) err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - emit(repoName) + if dryRun { + emit(repoName) + } var err error named, err := reference.ParseNamed(repoName) @@ -57,7 +52,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { // Mark the manifest's blob - emit("%s: marking manifest %s ", repoName, dgst) + if dryRun { + emit("%s: marking manifest %s ", repoName, dgst) + } markSet[dgst] = struct{}{} manifest, err := manifestService.Get(ctx, dgst) @@ -68,7 +65,9 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis descriptors := manifest.References() for _, descriptor := range descriptors { markSet[descriptor.Digest] = struct{}{} - emit("%s: marking blob %s", repoName, descriptor.Digest) + if dryRun { + emit("%s: marking blob %s", repoName, descriptor.Digest) + } } switch manifest.(type) { @@ -82,13 +81,17 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis return fmt.Errorf("failed to get signatures for signed manifest: %v", err) } for _, signatureDigest := range signatures { - emit("%s: marking signature %s", repoName, signatureDigest) + if dryRun { + emit("%s: marking signature %s", repoName, signatureDigest) + } markSet[signatureDigest] = struct{}{} } break case *schema2.DeserializedManifest: config := manifest.(*schema2.DeserializedManifest).Config - emit("%s: marking configuration %s", repoName, config.Digest) + if dryRun { + emit("%s: marking configuration %s", repoName, config.Digest) + } markSet[config.Digest] = struct{}{} break } @@ -127,13 +130,14 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis if err != nil { return fmt.Errorf("error enumerating blobs: %v", err) } - - emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + if dryRun { + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + } // Construct vacuum - vacuum := storage.NewVacuum(ctx, storageDriver) + vacuum := NewVacuum(ctx, storageDriver) for dgst := range deleteSet { - emit("blob eligible for deletion: %s", dgst) if dryRun { + emit("blob eligible for deletion: %s", dgst) continue } err = vacuum.RemoveBlob(string(dgst)) @@ -144,55 +148,3 @@ func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis return err } - -func init() { - GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") -} - -var dryRun bool - -// GCCmd is the cobra command that corresponds to the garbage-collect subcommand -var GCCmd = &cobra.Command{ - Use: "garbage-collect ", - Short: "`garbage-collect` deletes layers not referenced by any manifests", - Long: "`garbage-collect` deletes layers not referenced by any manifests", - Run: func(cmd *cobra.Command, args []string) { - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters()) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err) - os.Exit(1) - } - - ctx := context.Background() - ctx, err = configureLogging(ctx, config) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) - os.Exit(1) - } - - k, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - - registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) - os.Exit(1) - } - - err = markAndSweep(ctx, driver, registry) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) - os.Exit(1) - } - }, -} diff --git a/docs/garbagecollect_test.go b/docs/storage/garbagecollect_test.go similarity index 96% rename from docs/garbagecollect_test.go rename to docs/storage/garbagecollect_test.go index dd5fadd5..ff4a3df2 100644 --- a/docs/garbagecollect_test.go +++ b/docs/storage/garbagecollect_test.go @@ -1,4 +1,4 @@ -package registry +package storage import ( "io" @@ -8,7 +8,6 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -22,7 +21,7 @@ type image struct { func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace { ctx := context.Background() - registry, err := storage.NewRegistry(ctx, driver, storage.EnableDelete) + registry, err := NewRegistry(ctx, driver, EnableDelete) if err != nil { t.Fatalf("Failed to construct namespace") } @@ -161,7 +160,7 @@ func TestNoDeletionNoEffect(t *testing.T) { } // Run GC - err = markAndSweep(context.Background(), inmemoryDriver, registry) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -193,7 +192,7 @@ func TestDeletionHasEffect(t *testing.T) { manifests.Delete(ctx, image3.manifestDigest) // Run GC - err = markAndSweep(context.Background(), inmemoryDriver, registry) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -327,7 +326,7 @@ func TestOrphanBlobDeleted(t *testing.T) { uploadRandomSchema2Image(t, repo) // Run GC - err = markAndSweep(context.Background(), inmemoryDriver, registry) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } From 63d28d3b81dda6fd95adf1244a36afe80dc32434 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 27 Apr 2016 13:24:22 -0700 Subject: [PATCH 474/501] Add a test with a missing _manifests directory Signed-off-by: Richard Scothern --- docs/storage/garbagecollect_test.go | 32 +++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/storage/garbagecollect_test.go b/docs/storage/garbagecollect_test.go index ff4a3df2..a0ba154b 100644 --- a/docs/storage/garbagecollect_test.go +++ b/docs/storage/garbagecollect_test.go @@ -2,6 +2,7 @@ package storage import ( "io" + "path" "testing" "github.com/docker/distribution" @@ -176,6 +177,37 @@ func TestNoDeletionNoEffect(t *testing.T) { } } +func TestGCWithMissingManifests(t *testing.T) { + ctx := context.Background() + d := inmemory.New() + + registry := createRegistry(t, d) + repo := makeRepository(t, registry, "testrepo") + uploadRandomSchema1Image(t, repo) + + // Simulate a missing _manifests directory + revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) + if err != nil { + t.Fatal(err) + } + + _manifestsPath := path.Dir(revPath) + err = d.Delete(ctx, _manifestsPath) + if err != nil { + t.Fatal(err) + } + + err = MarkAndSweep(context.Background(), d, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + if len(blobs) > 0 { + t.Errorf("unexpected blobs after gc") + } +} + func TestDeletionHasEffect(t *testing.T) { ctx := context.Background() inmemoryDriver := inmemory.New() From 2a2577d7b1816956d6904c65b3869cec77002d0d Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Tue, 19 Apr 2016 16:31:25 -0700 Subject: [PATCH 475/501] When a blob upload is committed prevent writing out hashstate in the subsequent close. When a blob upload is cancelled close the blobwriter before removing upload state to ensure old hashstates don't persist. Signed-off-by: Richard Scothern --- docs/storage/blob_test.go | 17 +++++++++++++++++ docs/storage/blobwriter.go | 15 ++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 3698a415..7e1a7cd4 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" + "path" ) // TestWriteSeek tests that the current file size can be @@ -83,6 +84,15 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error during upload cancellation: %v", err) } + // get the enclosing directory + uploadPath := path.Dir(blobUpload.(*blobWriter).path) + + // ensure state was cleaned up + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after cleanup") + } + // Do a resume, get unknown upload blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != distribution.ErrBlobUploadUnknown { @@ -128,6 +138,13 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error finishing layer upload: %v", err) } + // ensure state was cleaned up + uploadPath = path.Dir(blobUpload.(*blobWriter).path) + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after commit") + } + // After finishing an upload, it should no longer exist. if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 7f280d36..2ae944a4 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -18,8 +18,8 @@ var ( errResumableDigestNotAvailable = errors.New("resumable digest not available") ) -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. +// blobWriter is used to control the various aspects of resumable +// blob upload. type blobWriter struct { ctx context.Context blobStore *linkedBlobStore @@ -34,6 +34,7 @@ type blobWriter struct { path string resumableDigestEnabled bool + committed bool } var _ distribution.BlobWriter = &blobWriter{} @@ -78,6 +79,7 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) return distribution.Descriptor{}, err } + bw.committed = true return canonical, nil } @@ -89,11 +91,14 @@ func (bw *blobWriter) Cancel(ctx context.Context) error { return err } + if err := bw.Close(); err != nil { + context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + if err := bw.removeResources(ctx); err != nil { return err } - bw.Close() return nil } @@ -130,6 +135,10 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { } func (bw *blobWriter) Close() error { + if bw.committed { + return errors.New("blobwriter close after commit") + } + if err := bw.storeHashState(bw.blobStore.ctx); err != nil { return err } From 28be207bc06249b6cbfa073bc9276eeb92566dbc Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Fri, 29 Apr 2016 23:34:24 +0200 Subject: [PATCH 476/501] Pass through known errors Signed-off-by: Troels Thomsen --- docs/handlers/app.go | 2 ++ docs/handlers/blobupload.go | 2 ++ docs/handlers/images.go | 2 ++ docs/handlers/tags.go | 2 ++ 4 files changed, 8 insertions(+) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 3c3e50d0..fc3f9069 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -634,6 +634,8 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) case distribution.ErrRepositoryNameInvalid: context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) + case errcode.Error: + context.Errors = append(context.Errors, err) } if err := errcode.ServeJSON(w, context.Errors); err != nil { diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 673e2c59..2cd5115d 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -239,6 +239,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht switch err := err.(type) { case distribution.ErrBlobInvalidDigest: buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + case errcode.Error: + buh.Errors = append(buh.Errors, err) default: switch err { case distribution.ErrAccessDenied: diff --git a/docs/handlers/images.go b/docs/handlers/images.go index 5f2d8855..dd2ed2c8 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -283,6 +283,8 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http } } } + case errcode.Error: + imh.Errors = append(imh.Errors, err) default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } diff --git a/docs/handlers/tags.go b/docs/handlers/tags.go index fd661e66..91f1031e 100644 --- a/docs/handlers/tags.go +++ b/docs/handlers/tags.go @@ -41,6 +41,8 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { switch err := err.(type) { case distribution.ErrRepositoryUnknown: th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) + case errcode.Error: + th.Errors = append(th.Errors, err) default: th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } From cbae4dd7bf2e4d23557893fa8123cdb52fe87b41 Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Tue, 26 Apr 2016 14:36:38 -0700 Subject: [PATCH 477/501] Implement regulator in filesystem driver This commit refactors base.regulator into the 2.4 interfaces and adds a filesystem configuration option `maxthreads` to configure the regulator. By default `maxthreads` is set to 100. This means the FS driver is limited to 100 concurrent blocking file operations. Any subsequent operations will block in Go until previous filesystem operations complete. This ensures that the registry can never open thousands of simultaneous threads from os filesystem operations. Note that `maxthreads` can never be less than 25. Add test case covering parsable string maxthreads Signed-off-by: Tony Holdstock-Brown --- docs/proxy/proxyblobstore_test.go | 18 +++- docs/storage/driver/base/regulator.go | 43 ++++----- docs/storage/driver/filesystem/driver.go | 81 ++++++++++++++--- docs/storage/driver/filesystem/driver_test.go | 89 ++++++++++++++++++- 4 files changed, 193 insertions(+), 38 deletions(-) diff --git a/docs/proxy/proxyblobstore_test.go b/docs/proxy/proxyblobstore_test.go index b93b5343..967dcd3d 100644 --- a/docs/proxy/proxyblobstore_test.go +++ b/docs/proxy/proxyblobstore_test.go @@ -132,8 +132,15 @@ func makeTestEnv(t *testing.T, name string) *testEnv { t.Fatalf("unable to create tempdir: %s", err) } + localDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": truthDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + // todo: create a tempfile area here - localRegistry, err := storage.NewRegistry(ctx, filesystem.New(truthDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -142,7 +149,14 @@ func makeTestEnv(t *testing.T, name string) *testEnv { t.Fatalf("unexpected error getting repo: %v", err) } - truthRegistry, err := storage.NewRegistry(ctx, filesystem.New(cacheDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": cacheDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } diff --git a/docs/storage/driver/base/regulator.go b/docs/storage/driver/base/regulator.go index 21ddfe57..185160a4 100644 --- a/docs/storage/driver/base/regulator.go +++ b/docs/storage/driver/base/regulator.go @@ -10,46 +10,41 @@ import ( type regulator struct { storagedriver.StorageDriver - sync.Cond + *sync.Cond - available uint + available uint64 } // NewRegulator wraps the given driver and is used to regulate concurrent calls // to the given storage driver to a maximum of the given limit. This is useful // for storage drivers that would otherwise create an unbounded number of OS // threads if allowed to be called unregulated. -func NewRegulator(driver storagedriver.StorageDriver, limit uint) storagedriver.StorageDriver { +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { return ®ulator{ StorageDriver: driver, - Cond: sync.Cond{ - L: &sync.Mutex{}, - }, - available: limit, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, } } -func (r *regulator) condition() bool { - return r.available > 0 -} - func (r *regulator) enter() { r.L.Lock() - defer r.L.Unlock() - - for !r.condition() { + for r.available == 0 { r.Wait() } - r.available-- + r.L.Unlock() } func (r *regulator) exit() { r.L.Lock() - defer r.Signal() - defer r.L.Unlock() - + // We only need to signal to a waiting FS operation if we're already at the + // limit of threads used + if r.available == 0 { + r.Signal() + } r.available++ + r.L.Unlock() } // Name returns the human-readable "name" of the driver, useful in error @@ -80,25 +75,25 @@ func (r *regulator) PutContent(ctx context.Context, path string, content []byte) return r.StorageDriver.PutContent(ctx, path, content) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. -func (r *regulator) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { r.enter() defer r.exit() - return r.StorageDriver.ReadStream(ctx, path, offset) + return r.StorageDriver.Reader(ctx, path, offset) } -// WriteStream stores the contents of the provided io.ReadCloser at a +// Writer stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. -func (r *regulator) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { +func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { r.enter() defer r.exit() - return r.StorageDriver.WriteStream(ctx, path, offset, reader) + return r.StorageDriver.Writer(ctx, path, append) } // Stat retrieves the FileInfo for the given path, including the current diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index e22e9809..1a897261 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -8,6 +8,8 @@ import ( "io/ioutil" "os" "path" + "reflect" + "strconv" "time" "github.com/docker/distribution/context" @@ -16,8 +18,23 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "filesystem" -const defaultRootDirectory = "/var/lib/registry" +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads + minThreads = uint64(25) +) + +// DriverParameters represents all configuration options available for the +// filesystem driver +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} func init() { factory.Register(driverName, &filesystemDriverFactory{}) @@ -27,7 +44,7 @@ func init() { type filesystemDriverFactory struct{} func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters), nil + return FromParameters(parameters) } type driver struct { @@ -47,25 +64,67 @@ type Driver struct { // FromParameters constructs a new Driver with a given parameters map // Optional Parameters: // - rootdirectory -func FromParameters(parameters map[string]interface{}) *Driver { - var rootDirectory = defaultRootDirectory +// - maxthreads +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + if parameters != nil { - rootDir, ok := parameters["rootdirectory"] - if ok { + if rootDir, ok := parameters["rootdirectory"]; ok { rootDirectory = fmt.Sprint(rootDir) } + + // Get maximum number of threads for blocking filesystem operations, + // if specified + threads := parameters["maxthreads"] + switch v := threads.(type) { + case string: + if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { + return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) + } + case uint64: + maxThreads = v + case int, int32, int64: + maxThreads = uint64(reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int()) + case uint, uint32: + maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) + } + + if maxThreads < minThreads { + maxThreads = minThreads + } } - return New(rootDirectory) + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, + } + return params, nil } // New constructs a new Driver with a given rootDirectory -func New(rootDirectory string) *Driver { - fsDriver := &driver{rootDirectory: rootDirectory} +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ - StorageDriver: base.NewRegulator(fsDriver, 100), + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), }, }, } diff --git a/docs/storage/driver/filesystem/driver_test.go b/docs/storage/driver/filesystem/driver_test.go index 8b48b431..3be85923 100644 --- a/docs/storage/driver/filesystem/driver_test.go +++ b/docs/storage/driver/filesystem/driver_test.go @@ -3,6 +3,7 @@ package filesystem import ( "io/ioutil" "os" + "reflect" "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -20,7 +21,93 @@ func init() { } defer os.Remove(root) + driver, err := FromParameters(map[string]interface{}{ + "rootdirectory": root, + }) + if err != nil { + panic(err) + } + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return New(root), nil + return driver, nil }, testsuites.NeverSkip) } + +func TestFromParametersImpl(t *testing.T) { + + tests := []struct { + params map[string]interface{} // techincally the yaml can contain anything + expected DriverParameters + pass bool + }{ + // check we use default threads and root dirs + { + params: map[string]interface{}{}, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: defaultMaxThreads, + }, + pass: true, + }, + // Testing initiation with a string maxThreads which can't be parsed + { + params: map[string]interface{}{ + "maxthreads": "fail", + }, + expected: DriverParameters{}, + pass: false, + }, + { + params: map[string]interface{}{ + "maxthreads": "100", + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + { + params: map[string]interface{}{ + "maxthreads": 100, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + // check that we use minimum thread counts + { + params: map[string]interface{}{ + "maxthreads": 1, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: minThreads, + }, + pass: true, + }, + } + + for _, item := range tests { + params, err := fromParametersImpl(item.params) + + if !item.pass { + // We only need to assert that expected failures have an error + if err == nil { + t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) + } + continue + } + + if err != nil { + t.Fatalf("unexpected error creating filesystem driver: %s", err) + } + // Note that we get a pointer to params back + if !reflect.DeepEqual(*params, item.expected) { + t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) + } + } + +} From 8762c800f1af28a609e7d76ba5bff960a5d02e95 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Tue, 3 May 2016 10:28:40 +0200 Subject: [PATCH 478/501] registry: type too many requests error Signed-off-by: Antonio Murdaca --- docs/api/errcode/register.go | 10 ++++++++++ docs/client/errors.go | 8 ++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index 01c34384..71cf6f7a 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -63,6 +63,16 @@ var ( Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) ) var nextCode = 1000 diff --git a/docs/client/errors.go b/docs/client/errors.go index 00fafe11..804e69e0 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -51,10 +51,14 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { } err = json.Unmarshal(body, &detailsErr) if err == nil && detailsErr.Details != "" { - if statusCode == http.StatusUnauthorized { + switch statusCode { + case http.StatusUnauthorized: return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) } - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) } if err := json.Unmarshal(body, &errors); err != nil { From db274d3c00dfbf231154275432bd906672fd749a Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Tue, 3 May 2016 21:24:43 +0200 Subject: [PATCH 479/501] registry: do not use http.StatusTooManyRequests go1.5 doesn't export http.StatusTooManyRequests while go1.6 does. Fix this by hardcoding the status code for now. Signed-off-by: Antonio Murdaca --- docs/api/errcode/register.go | 5 ++++- docs/client/errors.go | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index 71cf6f7a..7489e84f 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -71,7 +71,10 @@ var ( Message: "too many requests", Description: `Returned when a client attempts to contact a service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, + // FIXME: go1.5 doesn't export http.StatusTooManyRequests while + // go1.6 does. Update the hardcoded value to the constant once + // Docker updates golang version to 1.6. + HTTPStatusCode: 429, }) ) diff --git a/docs/client/errors.go b/docs/client/errors.go index 804e69e0..adbaacf4 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -54,7 +54,10 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { switch statusCode { case http.StatusUnauthorized: return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: + // FIXME: go1.5 doesn't export http.StatusTooManyRequests while + // go1.6 does. Update the hardcoded value to the constant once + // Docker updates golang version to 1.6. + case 429: return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) default: return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) From 1e05d81a71700ca9b14a84c4f55185520c72c029 Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Tue, 3 May 2016 16:03:22 -0700 Subject: [PATCH 480/501] Don't wrap thead limits when using a negative int Signed-off-by: Tony Holdstock-Brown --- docs/storage/driver/filesystem/driver.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/storage/driver/filesystem/driver.go b/docs/storage/driver/filesystem/driver.go index 1a897261..649e2bc2 100644 --- a/docs/storage/driver/filesystem/driver.go +++ b/docs/storage/driver/filesystem/driver.go @@ -96,7 +96,12 @@ func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, e case uint64: maxThreads = v case int, int32, int64: - maxThreads = uint64(reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int()) + val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() + // If threads is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + maxThreads = uint64(val) + } case uint, uint32: maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() case nil: From ddec5464667eb1b364d1713e1eaf85b3c216cc63 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Fri, 6 May 2016 10:46:37 +0100 Subject: [PATCH 481/501] StorageDriver: Test case for #1698 Signed-off-by: Arthur Baars --- docs/handlers/api_test.go | 18 ++--- docs/handlers/app_test.go | 6 +- docs/storage/blob_test.go | 16 +++-- docs/storage/driver/testdriver/testdriver.go | 71 ++++++++++++++++++++ 4 files changed, 92 insertions(+), 19 deletions(-) create mode 100644 docs/storage/driver/testdriver/testdriver.go diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 523ecca2..8f4bff0e 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -29,7 +29,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/testdriver" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" @@ -219,7 +219,7 @@ func contains(elems []string, e string) bool { func TestURLPrefix(t *testing.T) { config := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, + "testdriver": configuration.Parameters{}, }, } config.HTTP.Prefix = "/test/" @@ -296,7 +296,7 @@ func TestBlobDelete(t *testing.T) { func TestRelativeURL(t *testing.T) { config := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, + "testdriver": configuration.Parameters{}, }, } config.HTTP.Headers = headerConfig @@ -1884,8 +1884,8 @@ type testEnv struct { func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, }, Proxy: configuration.Proxy{ RemoteURL: "http://example.com", @@ -1899,8 +1899,8 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, + "testdriver": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, }, } @@ -2413,7 +2413,7 @@ func TestCheckContextNotifier(t *testing.T) { func TestProxyManifestGetByTag(t *testing.T) { truthConfig := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, + "testdriver": configuration.Parameters{}, }, } truthConfig.HTTP.Headers = headerConfig @@ -2427,7 +2427,7 @@ func TestProxyManifestGetByTag(t *testing.T) { proxyConfig := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, + "testdriver": configuration.Parameters{}, }, Proxy: configuration.Proxy{ RemoteURL: truthEnv.server.URL, diff --git a/docs/handlers/app_test.go b/docs/handlers/app_test.go index caa7ab97..3a8e4e1e 100644 --- a/docs/handlers/app_test.go +++ b/docs/handlers/app_test.go @@ -16,7 +16,7 @@ import ( _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/registry/storage/driver/testdriver" ) // TestAppDispatcher builds an application with a test dispatcher and ensures @@ -24,7 +24,7 @@ import ( // This only tests the dispatch mechanism. The underlying dispatchers must be // tested individually. func TestAppDispatcher(t *testing.T) { - driver := inmemory.New() + driver := testdriver.New() ctx := context.Background() registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) if err != nil { @@ -142,7 +142,7 @@ func TestNewApp(t *testing.T) { ctx := context.Background() config := configuration.Configuration{ Storage: configuration.Storage{ - "inmemory": nil, + "testdriver": nil, }, Auth: configuration.Auth{ // For now, we simply test that new auth results in a viable diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 7e1a7cd4..3cec3bff 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -14,7 +14,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/registry/storage/driver/testdriver" "github.com/docker/distribution/testutil" "path" ) @@ -24,7 +24,7 @@ import ( func TestWriteSeek(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() + driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) @@ -42,6 +42,7 @@ func TestWriteSeek(t *testing.T) { } contents := []byte{1, 2, 3} blobUpload.Write(contents) + blobUpload.Close() offset := blobUpload.Size() if offset != int64(len(contents)) { t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) @@ -59,7 +60,7 @@ func TestSimpleBlobUpload(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() + driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) @@ -120,11 +121,12 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("layer data write incomplete") } + blobUpload.Close() + offset := blobUpload.Size() if offset != nn { t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) } - blobUpload.Close() // Do a resume, for good fun blobUpload, err = bs.Resume(ctx, blobUpload.ID()) @@ -253,7 +255,7 @@ func TestSimpleBlobUpload(t *testing.T) { func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() + driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) @@ -365,7 +367,7 @@ func TestBlobMount(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") sourceImageName, _ := reference.ParseNamed("foo/source") - driver := inmemory.New() + driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) @@ -516,7 +518,7 @@ func TestBlobMount(t *testing.T) { func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") - driver := inmemory.New() + driver := testdriver.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) diff --git a/docs/storage/driver/testdriver/testdriver.go b/docs/storage/driver/testdriver/testdriver.go new file mode 100644 index 00000000..988e5d33 --- /dev/null +++ b/docs/storage/driver/testdriver/testdriver.go @@ -0,0 +1,71 @@ +package testdriver + +import ( + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +const driverName = "testdriver" + +func init() { + factory.Register(driverName, &testDriverFactory{}) +} + +// testDriverFactory implements the factory.StorageDriverFactory interface. +type testDriverFactory struct{} + +func (factory *testDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return New(), nil +} + +// TestDriver is a StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +type TestDriver struct { + storagedriver.StorageDriver +} + +type testFileWriter struct { + storagedriver.FileWriter + prevchunk []byte +} + +var _ storagedriver.StorageDriver = &TestDriver{} + +// New constructs a new StorageDriver for testing purposes. The Writer returned by this driver +// simulates the case where Write operations are buffered. This causes the value returned by Size to lag +// behind until Close (or Commit, or Cancel) is called. +func New() *TestDriver { + return &TestDriver{StorageDriver: inmemory.New()} +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (td *TestDriver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + fw, err := td.StorageDriver.Writer(ctx, path, append) + return &testFileWriter{FileWriter: fw}, err +} + +func (tfw *testFileWriter) Write(p []byte) (int, error) { + _, err := tfw.FileWriter.Write(tfw.prevchunk) + tfw.prevchunk = make([]byte, len(p)) + copy(tfw.prevchunk, p) + return len(p), err +} + +func (tfw *testFileWriter) Close() error { + tfw.Write(nil) + return tfw.FileWriter.Close() +} + +func (tfw *testFileWriter) Cancel() error { + tfw.Write(nil) + return tfw.FileWriter.Cancel() +} + +func (tfw *testFileWriter) Commit() error { + tfw.Write(nil) + return tfw.FileWriter.Commit() +} From af00617b993a42614cd5793e2b186f390c6f7893 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Thu, 5 May 2016 15:49:14 +0100 Subject: [PATCH 482/501] Blobwriter: call BlobWriter.Size after BlobWriter.Close Signed-off-by: Arthur Baars --- docs/handlers/blobupload.go | 5 +---- docs/storage/blobwriter.go | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index 673e2c59..b403a167 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -134,7 +134,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req } buh.Upload = upload - defer buh.Upload.Close() if err := buh.blobUploadResponse(w, r, true); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -224,11 +223,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - size := buh.Upload.Size() - desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ Digest: dgst, - Size: size, // TODO(stevvooe): This isn't wildly important yet, but we should // really set the mediatype. For now, we can let the backend take care @@ -293,6 +289,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. // TODO(stevvooe): Need a better way to manage the upload state automatically. buh.State.Name = buh.Repository.Named().Name() buh.State.UUID = buh.Upload.ID() + buh.Upload.Close() buh.State.Offset = buh.Upload.Size() buh.State.StartedAt = buh.Upload.StartedAt() diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 3387bafb..48ac8a75 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -58,6 +58,7 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) } bw.Close() + desc.Size = bw.Size() canonical, err := bw.validateBlob(ctx, desc) if err != nil { From e57fd4faa67a431518e21079c0190adffb11dea3 Mon Sep 17 00:00:00 2001 From: Arthur Baars Date: Thu, 5 May 2016 17:16:48 +0100 Subject: [PATCH 483/501] StorageDriver: GCS: allow Cancel on a closed FileWriter Signed-off-by: Arthur Baars --- docs/storage/driver/gcs/gcs.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/storage/driver/gcs/gcs.go b/docs/storage/driver/gcs/gcs.go index abe0b9f6..1369c280 100644 --- a/docs/storage/driver/gcs/gcs.go +++ b/docs/storage/driver/gcs/gcs.go @@ -321,12 +321,8 @@ type writer struct { // Cancel removes any written content from this FileWriter. func (w *writer) Cancel() error { - err := w.checkClosed() - if err != nil { - return err - } w.closed = true - err = storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + err := storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) if err != nil { if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { From bb841197c2ba90394b3c00d08ec9cb5ee1e7024e Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 9 May 2016 16:38:16 +0100 Subject: [PATCH 484/501] Add 'us-gov-west-1' to the valid region list. Signed-off-by: Richard Scothern --- docs/storage/driver/s3-aws/s3.go | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index 565f264d..902abeb4 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -82,6 +82,7 @@ func init() { "ap-northeast-2", "sa-east-1", "cn-north-1", + "us-gov-west-1", } { validRegions[region] = struct{}{} } From 50e6eef0761ecf06648e8ab74d5c9fc7aacc84dd Mon Sep 17 00:00:00 2001 From: Alexey Gladkov Date: Wed, 18 May 2016 18:54:27 +0200 Subject: [PATCH 485/501] Add support for blobAccessController middleware Signed-off-by: Michal Minar Signed-off-by: Alexey Gladkov --- docs/handlers/app.go | 2 +- docs/middleware/registry/middleware.go | 14 +++++++ docs/storage/registry.go | 54 ++++++++++++++++++-------- 3 files changed, 52 insertions(+), 18 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 3c3e50d0..c65441c6 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -177,7 +177,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.httpHost = *u } - options := []storage.RegistryOption{} + options := registrymiddleware.GetRegistryOptions() if app.isCache { options = append(options, storage.DisableDigestResumption) diff --git a/docs/middleware/registry/middleware.go b/docs/middleware/registry/middleware.go index 7535c6db..3e6e5cc7 100644 --- a/docs/middleware/registry/middleware.go +++ b/docs/middleware/registry/middleware.go @@ -5,6 +5,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" ) // InitFunc is the type of a RegistryMiddleware factory function and is @@ -12,6 +13,7 @@ import ( type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc +var registryoptions []storage.RegistryOption // Register is used to register an InitFunc for // a RegistryMiddleware backend with the given name. @@ -38,3 +40,15 @@ func Get(ctx context.Context, name string, options map[string]interface{}, regis return nil, fmt.Errorf("no registry middleware registered with name: %s", name) } + +// RegisterOptions adds more options to RegistryOption list. Options get applied before +// any other configuration-based options. +func RegisterOptions(options ...storage.RegistryOption) error { + registryoptions = append(registryoptions, options...) + return nil +} + +// GetRegistryOptions returns list of RegistryOption. +func GetRegistryOptions() []storage.RegistryOption { + return registryoptions +} diff --git a/docs/storage/registry.go b/docs/storage/registry.go index a1128b4a..3fe4ac68 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -12,14 +12,15 @@ import ( // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool - schema1SignaturesEnabled bool - schema1SigningKey libtrust.PrivateKey + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SignaturesEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory } // RegistryOption is the type used for functional options for NewRegistry. @@ -64,6 +65,15 @@ func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { } } +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + // BlobDescriptorCacheProvider returns a functional option for // NewRegistry. It creates a cached blob statter for use by the // registry. @@ -190,16 +200,22 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + blobStore := &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. @@ -258,6 +274,10 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) } + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + return &linkedBlobStore{ registry: repo.registry, blobStore: repo.blobStore, From dd66aabebafd0cf20f26d92a71e1a991d9309a39 Mon Sep 17 00:00:00 2001 From: John Starks Date: Sat, 14 May 2016 14:49:08 -0700 Subject: [PATCH 486/501] Add support for layers from foreign sources This will be used to support downloading Windows base layers from Microsoft URLs. Signed-off-by: John Starks --- docs/proxy/proxytagservice_test.go | 7 +- docs/storage/blob_test.go | 11 +- docs/storage/cache/cachecheck/suite.go | 15 +-- docs/storage/schema2manifesthandler.go | 31 +++++- docs/storage/schema2manifesthandler_test.go | 117 ++++++++++++++++++++ 5 files changed, 165 insertions(+), 16 deletions(-) create mode 100644 docs/storage/schema2manifesthandler_test.go diff --git a/docs/proxy/proxytagservice_test.go b/docs/proxy/proxytagservice_test.go index a446645c..ce0fe78b 100644 --- a/docs/proxy/proxytagservice_test.go +++ b/docs/proxy/proxytagservice_test.go @@ -1,6 +1,7 @@ package proxy import ( + "reflect" "sort" "sync" "testing" @@ -92,7 +93,7 @@ func TestGet(t *testing.T) { t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) } - if d != remoteDesc { + if !reflect.DeepEqual(d, remoteDesc) { t.Fatal("unable to get put tag") } @@ -101,7 +102,7 @@ func TestGet(t *testing.T) { t.Fatal("remote tag not pulled into store") } - if local != remoteDesc { + if !reflect.DeepEqual(local, remoteDesc) { t.Fatalf("unexpected descriptor pulled through") } @@ -121,7 +122,7 @@ func TestGet(t *testing.T) { t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) } - if d != newRemoteDesc { + if !reflect.DeepEqual(d, newRemoteDesc) { t.Fatal("unable to get put tag") } diff --git a/docs/storage/blob_test.go b/docs/storage/blob_test.go index 7e1a7cd4..f7ae70f1 100644 --- a/docs/storage/blob_test.go +++ b/docs/storage/blob_test.go @@ -7,6 +7,8 @@ import ( "io" "io/ioutil" "os" + "path" + "reflect" "testing" "github.com/docker/distribution" @@ -16,7 +18,6 @@ import ( "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" - "path" ) // TestWriteSeek tests that the current file size can be @@ -156,7 +157,7 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } - if statDesc != desc { + if !reflect.DeepEqual(statDesc, desc) { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } @@ -410,7 +411,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) } - if statDesc != desc { + if !reflect.DeepEqual(statDesc, desc) { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } @@ -436,7 +437,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected error mounting layer: %v", err) } - if ebm.Descriptor != desc { + if !reflect.DeepEqual(ebm.Descriptor, desc) { t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) } @@ -446,7 +447,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } - if statDesc != desc { + if !reflect.DeepEqual(statDesc, desc) { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } diff --git a/docs/storage/cache/cachecheck/suite.go b/docs/storage/cache/cachecheck/suite.go index 13e9c132..cba5addd 100644 --- a/docs/storage/cache/cachecheck/suite.go +++ b/docs/storage/cache/cachecheck/suite.go @@ -1,6 +1,7 @@ package cachecheck import ( + "reflect" "testing" "github.com/docker/distribution" @@ -79,7 +80,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("unexpected error statting fake2:abc: %v", err) } - if expected != desc { + if !reflect.DeepEqual(expected, desc) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } @@ -89,7 +90,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("descriptor not returned for canonical key: %v", err) } - if expected != desc { + if !reflect.DeepEqual(expected, desc) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } @@ -99,7 +100,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) } - if desc != expected { + if !reflect.DeepEqual(desc, expected) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } @@ -109,7 +110,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("unexpected error checking glboal descriptor: %v", err) } - if desc != expected { + if !reflect.DeepEqual(desc, expected) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } @@ -126,7 +127,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi t.Fatalf("unexpected error getting descriptor: %v", err) } - if desc != expected { + if !reflect.DeepEqual(desc, expected) { t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } @@ -137,7 +138,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi expected.MediaType = "application/octet-stream" // expect original mediatype in global - if desc != expected { + if !reflect.DeepEqual(desc, expected) { t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } } @@ -163,7 +164,7 @@ func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider c t.Fatalf("unexpected error statting fake2:abc: %v", err) } - if expected != desc { + if !reflect.DeepEqual(expected, desc) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } diff --git a/docs/storage/schema2manifesthandler.go b/docs/storage/schema2manifesthandler.go index 115786e2..6456efa4 100644 --- a/docs/storage/schema2manifesthandler.go +++ b/docs/storage/schema2manifesthandler.go @@ -1,15 +1,24 @@ package storage import ( + "errors" "fmt" + "net/url" "encoding/json" + "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" ) +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + //schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. type schema2ManifestHandler struct { repository *repository @@ -80,7 +89,27 @@ func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst sche } for _, fsLayer := range mnfst.References() { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + var err error + if fsLayer.MediaType != schema2.MediaTypeForeignLayer { + if len(fsLayer.URLs) == 0 { + _, err = ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + } else { + err = errUnexpectedURL + } + } else { + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(fsLayer.URLs) == 0 { + err = errMissingURL + } + for _, u := range fsLayer.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" { + err = errInvalidURL + } + } + } if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) diff --git a/docs/storage/schema2manifesthandler_test.go b/docs/storage/schema2manifesthandler_test.go new file mode 100644 index 00000000..c2f61edf --- /dev/null +++ b/docs/storage/schema2manifesthandler_test.go @@ -0,0 +1,117 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeForeignLayer, + } + + template := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + layer, + []string{"http://foo/bar"}, + errUnexpectedURL, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := schema2.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} From f1b815ed9f983c164b5f90db92ca8063bd84d128 Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Tue, 24 May 2016 11:07:55 -0700 Subject: [PATCH 487/501] Pass in `app` as context to apply{N}Middleware This lets us access registry config within middleware for additional configuration of whatever it is that you're overriding. Signed-off-by: Tony Holdstock-Brown --- docs/handlers/app.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 3c3e50d0..bf6727af 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -258,7 +258,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { } } - app.registry, err = applyRegistryMiddleware(app.Context, app.registry, config.Middleware["registry"]) + app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) if err != nil { panic(err) } @@ -647,7 +647,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { repository, app.eventBridge(context, r)) - context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) + context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) From d3b61b612f5e14ba0d74872ed6af913d48719a37 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 6 Apr 2016 17:01:30 -0700 Subject: [PATCH 488/501] Remove signature store from registry. Return a generated signature for manifest pull. Signed-off-by: Richard Scothern --- docs/handlers/api_test.go | 4 +- docs/handlers/app.go | 10 +- docs/proxy/proxymanifeststore_test.go | 17 ++-- docs/root.go | 2 +- docs/storage/blobstore.go | 1 - docs/storage/garbagecollect.go | 17 ---- docs/storage/garbagecollect_test.go | 24 ++--- docs/storage/manifeststore.go | 47 --------- docs/storage/manifeststore_test.go | 43 +-------- docs/storage/paths.go | 54 +---------- docs/storage/paths_test.go | 17 +--- docs/storage/registry.go | 22 +---- docs/storage/signaturestore.go | 131 -------------------------- docs/storage/signedmanifesthandler.go | 22 ----- 14 files changed, 33 insertions(+), 378 deletions(-) delete mode 100644 docs/storage/signaturestore.go diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 523ecca2..01fd4f4c 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1067,13 +1067,13 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name t.Fatalf("error decoding fetched manifest: %v", err) } - // check two signatures were roundtripped + // check only 1 signature is returned signatures, err = fetchedManifestByDigest.Signatures() if err != nil { t.Fatal(err) } - if len(signatures) != 2 { + if len(signatures) != 1 { t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) } diff --git a/docs/handlers/app.go b/docs/handlers/app.go index 4bda082b..384a61d6 100644 --- a/docs/handlers/app.go +++ b/docs/handlers/app.go @@ -155,6 +155,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.configureRedis(config) app.configureLogHook(config) + options := registrymiddleware.GetRegistryOptions() if config.Compatibility.Schema1.TrustKey != "" { app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) if err != nil { @@ -169,6 +170,8 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { } } + options = append(options, storage.Schema1SigningKey(app.trustKey)) + if config.HTTP.Host != "" { u, err := url.Parse(config.HTTP.Host) if err != nil { @@ -177,17 +180,10 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.httpHost = *u } - options := registrymiddleware.GetRegistryOptions() - if app.isCache { options = append(options, storage.DisableDigestResumption) } - if config.Compatibility.Schema1.DisableSignatureStore { - options = append(options, storage.DisableSchema1Signatures) - options = append(options, storage.Schema1SigningKey(app.trustKey)) - } - // configure deletion if d, ok := config.Storage["delete"]; ok { e, ok := d["enabled"] diff --git a/docs/proxy/proxymanifeststore_test.go b/docs/proxy/proxymanifeststore_test.go index 1069d66c..0d6b7171 100644 --- a/docs/proxy/proxymanifeststore_test.go +++ b/docs/proxy/proxymanifeststore_test.go @@ -60,12 +60,6 @@ func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, return sm.manifests.Put(ctx, manifest) } -/*func (sm statsManifest) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - sm.stats["enumerate"]++ - return sm.manifests.Enumerate(ctx, manifests, last) -} -*/ - type mockChallenger struct { sync.Mutex count int @@ -75,7 +69,6 @@ type mockChallenger struct { func (m *mockChallenger) tryEstablishChallenges(context.Context) error { m.Lock() defer m.Unlock() - m.count++ return nil } @@ -93,9 +86,15 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE if err != nil { t.Fatalf("unable to parse reference: %s", err) } + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } ctx := context.Background() - truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), + storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), + storage.Schema1SigningKey(k)) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -117,7 +116,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE t.Fatalf(err.Error()) } - localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) if err != nil { t.Fatalf("error creating registry: %v", err) } diff --git a/docs/root.go b/docs/root.go index 7a7d44cb..5d3005c2 100644 --- a/docs/root.go +++ b/docs/root.go @@ -69,7 +69,7 @@ var GCCmd = &cobra.Command{ os.Exit(1) } - registry, err := storage.NewRegistry(ctx, driver, storage.DisableSchema1Signatures, storage.Schema1SigningKey(k)) + registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k)) if err != nil { fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err) os.Exit(1) diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 9034cb68..84f6660f 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -75,7 +75,6 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr } // TODO(stevvooe): Write out mediatype here, as well. - return distribution.Descriptor{ Size: int64(len(p)), diff --git a/docs/storage/garbagecollect.go b/docs/storage/garbagecollect.go index be64b847..bc340416 100644 --- a/docs/storage/garbagecollect.go +++ b/docs/storage/garbagecollect.go @@ -6,7 +6,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" @@ -71,22 +70,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } switch manifest.(type) { - case *schema1.SignedManifest: - signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) - if !ok { - return fmt.Errorf("unable to convert ManifestService into SignaturesGetter") - } - signatures, err := signaturesGetter.GetSignatures(ctx, dgst) - if err != nil { - return fmt.Errorf("failed to get signatures for signed manifest: %v", err) - } - for _, signatureDigest := range signatures { - if dryRun { - emit("%s: marking signature %s", repoName, signatureDigest) - } - markSet[signatureDigest] = struct{}{} - } - break case *schema2.DeserializedManifest: config := manifest.(*schema2.DeserializedManifest).Config if dryRun { diff --git a/docs/storage/garbagecollect_test.go b/docs/storage/garbagecollect_test.go index a0ba154b..86fc175a 100644 --- a/docs/storage/garbagecollect_test.go +++ b/docs/storage/garbagecollect_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" + "github.com/docker/libtrust" ) type image struct { @@ -22,7 +23,11 @@ type image struct { func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace { ctx := context.Background() - registry, err := NewRegistry(ctx, driver, EnableDelete) + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + registry, err := NewRegistry(ctx, driver, EnableDelete, Schema1SigningKey(k)) if err != nil { t.Fatalf("Failed to construct namespace") } @@ -139,13 +144,13 @@ func TestNoDeletionNoEffect(t *testing.T) { ctx := context.Background() inmemoryDriver := inmemory.New() - registry := createRegistry(t, inmemoryDriver) + registry := createRegistry(t, inmemory.New()) repo := makeRepository(t, registry, "palailogos") manifestService, err := repo.Manifests(ctx) image1 := uploadRandomSchema1Image(t, repo) image2 := uploadRandomSchema1Image(t, repo) - image3 := uploadRandomSchema2Image(t, repo) + uploadRandomSchema2Image(t, repo) // construct manifestlist for fun. blobstatter := registry.BlobStatter() @@ -160,20 +165,17 @@ func TestNoDeletionNoEffect(t *testing.T) { t.Fatalf("Failed to add manifest list: %v", err) } + before := allBlobs(t, registry) + // Run GC err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } - blobs := allBlobs(t, registry) - - // the +1 at the end is for the manifestList - // the first +3 at the end for each manifest's blob - // the second +3 at the end for each manifest's signature/config layer - totalBlobCount := len(image1.layers) + len(image2.layers) + len(image3.layers) + 1 + 3 + 3 - if len(blobs) != totalBlobCount { - t.Fatalf("Garbage collection affected storage") + after := allBlobs(t, registry) + if len(before) != len(after) { + t.Fatalf("Garbage collection affected storage: %d != %d", len(before), len(after)) } } diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 5a9165f9..68483c95 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -2,7 +2,6 @@ package storage import ( "fmt" - "path" "encoding/json" "github.com/docker/distribution" @@ -12,7 +11,6 @@ import ( "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/storage/driver" ) // A ManifestHandler gets and puts manifests of a particular type. @@ -141,48 +139,3 @@ func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Dig }) return err } - -// Only valid for schema1 signed manifests -func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) { - // sanity check that digest refers to a schema1 digest - manifest, err := ms.Get(ctx, manifestDigest) - if err != nil { - return nil, err - } - - if _, ok := manifest.(*schema1.SignedManifest); !ok { - return nil, fmt.Errorf("digest %v is not for schema1 manifest", manifestDigest) - } - - signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: ms.repository.Named().Name(), - revision: manifestDigest, - }) - if err != nil { - return nil, err - } - - var digests []digest.Digest - alg := string(digest.SHA256) - signaturePaths, err := ms.blobStore.driver.List(ctx, path.Join(signaturesPath, alg)) - - switch err.(type) { - case nil: - break - case driver.PathNotFoundError: - // Manifest may have been pushed with signature store disabled - return digests, nil - default: - return nil, err - } - - for _, sigPath := range signaturePaths { - sigdigest, err := digest.ParseDigest(alg + ":" + path.Base(sigPath)) - if err != nil { - // merely found not a digest - continue - } - digests = append(digests, sigdigest) - } - return digests, nil -} diff --git a/docs/storage/manifeststore_test.go b/docs/storage/manifeststore_test.go index fcb5adf9..cbd30c04 100644 --- a/docs/storage/manifeststore_test.go +++ b/docs/storage/manifeststore_test.go @@ -52,15 +52,11 @@ func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, opt } func TestManifestStorage(t *testing.T) { - testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) -} - -func TestManifestStorageDisabledSignatures(t *testing.T) { k, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatal(err) } - testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, DisableSchema1Signatures, Schema1SigningKey(k)) + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) } func testManifestStorage(t *testing.T, options ...RegistryOption) { @@ -71,7 +67,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { if err != nil { t.Fatal(err) } - equalSignatures := env.registry.(*registry).schema1SignaturesEnabled m := schema1.Manifest{ Versioned: manifest.Versioned{ @@ -175,12 +170,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) } - if equalSignatures { - if !reflect.DeepEqual(fetchedManifest, sm) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest.Manifest, sm.Manifest) - } - } - _, pl, err := fetchedManifest.Payload() if err != nil { t.Fatalf("error getting payload %#v", err) @@ -223,12 +212,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) } - if equalSignatures { - if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) - } - } - sigs, err := fetchedJWS.Signatures() if err != nil { t.Fatalf("unable to extract signatures: %v", err) @@ -285,17 +268,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { t.Fatalf("unexpected error verifying manifest: %v", err) } - // Assemble our payload and two signatures to get what we expect! - expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) - if err != nil { - t.Fatalf("unexpected error merging jws: %v", err) - } - - expectedSigs, err := expectedJWS.Signatures() - if err != nil { - t.Fatalf("unexpected error getting expected signatures: %v", err) - } - _, pl, err = fetched.Payload() if err != nil { t.Fatalf("error getting payload %#v", err) @@ -315,19 +287,6 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { t.Fatalf("payloads are not equal") } - if equalSignatures { - receivedSigs, err := receivedJWS.Signatures() - if err != nil { - t.Fatalf("error getting signatures: %v", err) - } - - for i, sig := range receivedSigs { - if !bytes.Equal(sig, expectedSigs[i]) { - t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) - } - } - } - // Test deleting manifests err = ms.Delete(ctx, dgst) if err != nil { diff --git a/docs/storage/paths.go b/docs/storage/paths.go index 8985f043..1b142b88 100644 --- a/docs/storage/paths.go +++ b/docs/storage/paths.go @@ -30,8 +30,6 @@ const ( // revisions // -> // -> link -// -> signatures -// //link // tags/ // -> current/link // -> index @@ -62,8 +60,7 @@ const ( // // The third component of the repository directory is the manifests store, // which is made up of a revision store and tag store. Manifests are stored in -// the blob store and linked into the revision store. Signatures are separated -// from the manifest payload data and linked into the blob store, as well. +// the blob store and linked into the revision store. // While the registry can save all revisions of a manifest, no relationship is // implied as to the ordering of changes to a manifest. The tag store provides // support for name, tag lookups of manifests, using "current/link" under a @@ -77,8 +74,6 @@ const ( // manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ // manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// // manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link -// manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ -// manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link // // Tags: // @@ -148,33 +143,6 @@ func pathFor(spec pathSpec) (string, error) { } return path.Join(root, "link"), nil - case manifestSignaturesPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - return path.Join(root, "signatures"), nil - case manifestSignatureLinkPathSpec: - root, err := pathFor(manifestSignaturesPathSpec{ - name: v.name, - revision: v.revision, - }) - - if err != nil { - return "", err - } - - signatureComponents, err := digestPathComponents(v.signature, false) - if err != nil { - return "", err - } - - return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil case manifestTagsPathSpec: return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil case manifestTagPathSpec: @@ -325,26 +293,6 @@ type manifestRevisionLinkPathSpec struct { func (manifestRevisionLinkPathSpec) pathSpec() {} -// manifestSignaturesPathSpec describes the path components for the directory -// containing all the signatures for the target blob. Entries are named with -// the underlying key id. -type manifestSignaturesPathSpec struct { - name string - revision digest.Digest -} - -func (manifestSignaturesPathSpec) pathSpec() {} - -// manifestSignatureLinkPathSpec describes the path components used to look up -// a signature file by the hash of its blob. -type manifestSignatureLinkPathSpec struct { - name string - revision digest.Digest - signature digest.Digest -} - -func (manifestSignatureLinkPathSpec) pathSpec() {} - // manifestTagsPathSpec describes the path elements required to point to the // manifest tags directory. type manifestTagsPathSpec struct { diff --git a/docs/storage/paths_test.go b/docs/storage/paths_test.go index 91004bd4..f739552a 100644 --- a/docs/storage/paths_test.go +++ b/docs/storage/paths_test.go @@ -26,21 +26,6 @@ func TestPathMapper(t *testing.T) { }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, - { - spec: manifestSignatureLinkPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", - }, - { - spec: manifestSignaturesPathSpec{ - name: "foo/bar", - revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures", - }, { spec: manifestTagsPathSpec{ name: "foo/bar", @@ -113,7 +98,7 @@ func TestPathMapper(t *testing.T) { // Add a few test cases to ensure we cover some errors // Specify a path that requires a revision and get a digest validation error. - badpath, err := pathFor(manifestSignaturesPathSpec{ + badpath, err := pathFor(manifestRevisionPathSpec{ name: "foo/bar", }) diff --git a/docs/storage/registry.go b/docs/storage/registry.go index 3fe4ac68..94034b26 100644 --- a/docs/storage/registry.go +++ b/docs/storage/registry.go @@ -18,7 +18,6 @@ type registry struct { blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool resumableDigestEnabled bool - schema1SignaturesEnabled bool schema1SigningKey libtrust.PrivateKey blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory } @@ -47,17 +46,8 @@ func DisableDigestResumption(registry *registry) error { return nil } -// DisableSchema1Signatures is a functional option for NewRegistry. It disables -// signature storage and ensures all schema1 manifests will only be returned -// with a signature from a provided signing key. -func DisableSchema1Signatures(registry *registry) error { - registry.schema1SignaturesEnabled = false - return nil -} - // Schema1SigningKey returns a functional option for NewRegistry. It sets the -// signing key for adding a signature to all schema1 manifests. This should be -// used in conjunction with disabling signature store. +// key for signing all schema1 manifests. func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { return func(registry *registry) error { registry.schema1SigningKey = key @@ -116,9 +106,8 @@ func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, option statter: statter, pathFn: bs.path, }, - statter: statter, - resumableDigestEnabled: true, - schema1SignaturesEnabled: true, + statter: statter, + resumableDigestEnabled: true, } for _, option := range options { @@ -231,11 +220,6 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M ctx: ctx, repository: repo, blobStore: blobStore, - signatures: &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: repo.blobStore, - }, }, schema2Handler: &schema2ManifestHandler{ ctx: ctx, diff --git a/docs/storage/signaturestore.go b/docs/storage/signaturestore.go deleted file mode 100644 index 2940e041..00000000 --- a/docs/storage/signaturestore.go +++ /dev/null @@ -1,131 +0,0 @@ -package storage - -import ( - "path" - "sync" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -type signatureStore struct { - repository *repository - blobStore *blobStore - ctx context.Context -} - -func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { - signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Named().Name(), - revision: dgst, - }) - - if err != nil { - return nil, err - } - - // Need to append signature digest algorithm to path to get all items. - // Perhaps, this should be in the pathMapper but it feels awkward. This - // can be eliminated by implementing listAll on drivers. - signaturesPath = path.Join(signaturesPath, "sha256") - - signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) - if err != nil { - return nil, err - } - - var wg sync.WaitGroup - type result struct { - index int - signature []byte - err error - } - ch := make(chan result) - - bs := s.linkedBlobStore(s.ctx, dgst) - for i, sigPath := range signaturePaths { - sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) - if err != nil { - context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) - continue - } - - wg.Add(1) - go func(idx int, sigdgst digest.Digest) { - defer wg.Done() - context.GetLogger(s.ctx). - Debugf("fetching signature %q", sigdgst) - - r := result{index: idx} - - if p, err := bs.Get(s.ctx, sigdgst); err != nil { - context.GetLogger(s.ctx). - Errorf("error fetching signature %q: %v", sigdgst, err) - r.err = err - } else { - r.signature = p - } - - ch <- r - }(i, sigdgst) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // aggregrate the results - signatures := make([][]byte, len(signaturePaths)) -loop: - for { - select { - case result := <-ch: - signatures[result.index] = result.signature - if result.err != nil && err == nil { - // only set the first one. - err = result.err - } - case <-done: - break loop - } - } - - return signatures, err -} - -func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { - bs := s.linkedBlobStore(s.ctx, dgst) - for _, signature := range signatures { - if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { - return err - } - } - return nil -} - -// linkedBlobStore returns the namedBlobStore of the signatures for the -// manifest with the given digest. Effectively, each signature link path -// layout is a unique linked blob store. -func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { - linkpath := func(name string, dgst digest.Digest) (string, error) { - return pathFor(manifestSignatureLinkPathSpec{ - name: name, - revision: revision, - signature: dgst, - }) - - } - - return &linkedBlobStore{ - ctx: ctx, - repository: s.repository, - blobStore: s.blobStore, - blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPathFns: []linkPathFunc{linkpath}, - }, - linkPathFns: []linkPathFunc{linkpath}, - } -} diff --git a/docs/storage/signedmanifesthandler.go b/docs/storage/signedmanifesthandler.go index 8e13dd93..df6369f3 100644 --- a/docs/storage/signedmanifesthandler.go +++ b/docs/storage/signedmanifesthandler.go @@ -18,7 +18,6 @@ type signedManifestHandler struct { repository *repository blobStore *linkedBlobStore ctx context.Context - signatures *signatureStore } var _ ManifestHandler = &signedManifestHandler{} @@ -30,13 +29,6 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige signatures [][]byte err error ) - if ms.repository.schema1SignaturesEnabled { - // Fetch the signatures for the manifest - signatures, err = ms.signatures.Get(dgst) - if err != nil { - return nil, err - } - } jsig, err := libtrust.NewJSONSignature(content, signatures...) if err != nil { @@ -47,8 +39,6 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige if err := jsig.Sign(ms.repository.schema1SigningKey); err != nil { return nil, err } - } else if !ms.repository.schema1SignaturesEnabled { - return nil, fmt.Errorf("missing signing key with signature store disabled") } // Extract the pretty JWS @@ -90,18 +80,6 @@ func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution. return "", err } - if ms.repository.schema1SignaturesEnabled { - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return "", err - } - - if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { - return "", err - } - } - return revision.Digest, nil } From 6d0db0e2dd78975a6c75b5186e558bb7e9f2daa9 Mon Sep 17 00:00:00 2001 From: allencloud Date: Thu, 2 Jun 2016 13:31:13 +0800 Subject: [PATCH 489/501] fix typos Signed-off-by: allencloud --- docs/auth/htpasswd/htpasswd.go | 2 +- docs/client/errors.go | 2 +- docs/handlers/api_test.go | 2 +- docs/handlers/helpers.go | 2 +- docs/registry.go | 2 +- docs/storage/driver/middleware/cloudfront/middleware.go | 2 +- docs/storage/filereader_test.go | 2 +- docs/storage/linkedblobstore.go | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/auth/htpasswd/htpasswd.go b/docs/auth/htpasswd/htpasswd.go index 494ad0a7..8e4f6167 100644 --- a/docs/auth/htpasswd/htpasswd.go +++ b/docs/auth/htpasswd/htpasswd.go @@ -46,7 +46,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err // parseHTPasswd parses the contents of htpasswd. This will read all the // entries in the file, whether or not they are needed. An error is returned -// if an syntax errors are encountered or if the reader fails. +// if a syntax errors are encountered or if the reader fails. func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { entries := map[string][]byte{} scanner := bufio.NewScanner(rd) diff --git a/docs/client/errors.go b/docs/client/errors.go index adbaacf4..7606d0c9 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -11,7 +11,7 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) -// ErrNoErrorsInBody is returned when a HTTP response body parses to an empty +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty // errcode.Errors slice. var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 01fd4f4c..076207ed 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -926,7 +926,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name } // TODO(stevvooe): Add a test case where we take a mostly valid registry, - // tamper with the content and ensure that we get a unverified manifest + // tamper with the content and ensure that we get an unverified manifest // error. // Push 2 random layers diff --git a/docs/handlers/helpers.go b/docs/handlers/helpers.go index b56c1566..dac4f7a8 100644 --- a/docs/handlers/helpers.go +++ b/docs/handlers/helpers.go @@ -20,7 +20,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { }) } -// copyFullPayload copies the payload of a HTTP request to destWriter. If it +// copyFullPayload copies the payload of an HTTP request to destWriter. If it // receives less content than expected, and the client disconnected during the // upload, it avoids sending a 400 error to keep the logs cleaner. func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { diff --git a/docs/registry.go b/docs/registry.go index a1ba3b1a..aec6a030 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -267,7 +267,7 @@ func logLevel(level configuration.Loglevel) log.Level { return l } -// panicHandler add a HTTP handler to web app. The handler recover the happening +// panicHandler add an HTTP handler to web app. The handler recover the happening // panic. logrus.Panic transmits panic message to pre-config log hooks, which is // defined in config.yml. func panicHandler(handler http.Handler) http.Handler { diff --git a/docs/storage/driver/middleware/cloudfront/middleware.go b/docs/storage/driver/middleware/cloudfront/middleware.go index 9162c09d..b0618d1a 100644 --- a/docs/storage/driver/middleware/cloudfront/middleware.go +++ b/docs/storage/driver/middleware/cloudfront/middleware.go @@ -18,7 +18,7 @@ import ( storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) -// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that +// cloudFrontStorageMiddleware provides a simple implementation of layerHandler that // constructs temporary signed CloudFront URLs from the storagedriver layer URL, // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { diff --git a/docs/storage/filereader_test.go b/docs/storage/filereader_test.go index 774a864b..f43873b3 100644 --- a/docs/storage/filereader_test.go +++ b/docs/storage/filereader_test.go @@ -183,7 +183,7 @@ func TestFileReaderNonExistentFile(t *testing.T) { // conditions that can arise when reading a layer. func TestFileReaderErrors(t *testing.T) { // TODO(stevvooe): We need to cover error return types, driven by the - // errors returned via the HTTP API. For now, here is a incomplete list: + // errors returned via the HTTP API. For now, here is an incomplete list: // // 1. Layer Not Found: returned when layer is not found or access is // denied. diff --git a/docs/storage/linkedblobstore.go b/docs/storage/linkedblobstore.go index 68a347b4..d254bbb8 100644 --- a/docs/storage/linkedblobstore.go +++ b/docs/storage/linkedblobstore.go @@ -35,7 +35,7 @@ type linkedBlobStore struct { // control the repository blob link set to which the blob store // dispatches. This is required because manifest and layer blobs have not // yet been fully merged. At some point, this functionality should be - // removed an the blob links folder should be merged. The first entry is + // removed the blob links folder should be merged. The first entry is // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc From 4e09e1b6589cc362e2c96857447d4226416c1573 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Wed, 8 Jun 2016 10:19:15 +0200 Subject: [PATCH 490/501] registry: use const for status code 429 Signed-off-by: Antonio Murdaca --- docs/api/errcode/register.go | 5 +---- docs/client/errors.go | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index 7489e84f..71cf6f7a 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -71,10 +71,7 @@ var ( Message: "too many requests", Description: `Returned when a client attempts to contact a service too many times`, - // FIXME: go1.5 doesn't export http.StatusTooManyRequests while - // go1.6 does. Update the hardcoded value to the constant once - // Docker updates golang version to 1.6. - HTTPStatusCode: 429, + HTTPStatusCode: http.StatusTooManyRequests, }) ) diff --git a/docs/client/errors.go b/docs/client/errors.go index adbaacf4..804e69e0 100644 --- a/docs/client/errors.go +++ b/docs/client/errors.go @@ -54,10 +54,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error { switch statusCode { case http.StatusUnauthorized: return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - // FIXME: go1.5 doesn't export http.StatusTooManyRequests while - // go1.6 does. Update the hardcoded value to the constant once - // Docker updates golang version to 1.6. - case 429: + case http.StatusTooManyRequests: return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) default: return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) From ec7c59138161119d406d16cf4fbecd8178a571c9 Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Wed, 8 Jun 2016 10:39:17 -0700 Subject: [PATCH 491/501] Clarify API documentation around catalog fetch behavior Signed-off-by: Richard Scothern --- docs/api/v2/descriptors.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index 58279994..fc42c1c4 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -1497,8 +1497,8 @@ var routeDescriptors = []RouteDescriptor{ Description: "Retrieve a sorted, json list of repositories available in the registry.", Requests: []RequestDescriptor{ { - Name: "Catalog Fetch Complete", - Description: "Request an unabridged list of repositories available.", + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", Successes: []ResponseDescriptor{ { Description: "Returns the unabridged list of repositories as a json response.", From f3ae941cca906a2b738a36df8c9442c7b2d2011a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 8 Jun 2016 17:02:29 -0700 Subject: [PATCH 492/501] Add option to get content digest from manifest get The client may need the content digest to delete a manifest using the digest used by the registry. Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/client/repository.go | 23 +++++++++++++++++++++++ docs/client/repository_test.go | 29 ++++++++++++++++++++++------- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 8cc5f7f9..323ab508 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -394,11 +394,26 @@ func (o etagOption) Apply(ms distribution.ManifestService) error { return fmt.Errorf("etag options is a client-only option") } +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { var ( digestOrTag string ref reference.Named err error + contentDgst *digest.Digest ) for _, option := range options { @@ -408,6 +423,8 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis if err != nil { return nil, err } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest } else { err := option.Apply(ms) if err != nil { @@ -450,6 +467,12 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } mt := resp.Header.Get("Content-Type") body, err := ioutil.ReadAll(resp.Body) diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 2faeb276..19b6ca2c 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -605,6 +605,14 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } +func contentDigestString(mediatype string, content []byte) string { + if mediatype == schema1.MediaTypeSignedManifest { + m, _, _ := distribution.UnmarshalManifest(mediatype, content) + content = m.(*schema1.SignedManifest).Canonical + } + return digest.Canonical.FromBytes(content).String() +} + func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ @@ -615,9 +623,10 @@ func addTestManifest(repo reference.Named, reference string, mediatype string, c StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {contentDigestString(mediatype, content)}, }), }, }) @@ -629,9 +638,10 @@ func addTestManifest(repo reference.Named, reference string, mediatype string, c Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(content))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - "Content-Type": {mediatype}, + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + "Docker-Content-Digest": {digest.Canonical.FromBytes(content).String()}, }), }, }) @@ -710,7 +720,8 @@ func TestV1ManifestFetch(t *testing.T) { t.Fatal(err) } - manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest")) + var contentDigest digest.Digest + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest"), ReturnContentDigest(&contentDigest)) if err != nil { t.Fatal(err) } @@ -723,6 +734,10 @@ func TestV1ManifestFetch(t *testing.T) { t.Fatal(err) } + if contentDigest != dgst { + t.Fatalf("Unexpected returned content digest %v, expected %v", contentDigest, dgst) + } + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) if err != nil { t.Fatal(err) From 5de53e3448da08dcf98b68b9478ccb5b648f14a5 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 10 Jun 2016 16:34:08 -0700 Subject: [PATCH 493/501] Update "Accept" header parsing for list values In Go's header parsing, the same header multiple times results in multiple entries in the `r.Header[...]` slice, but Go does no further parsing beyond that (and in https://golang.org/cl/4528086 it was determined that until/unless the stdlib itself needs it, Go will not do so). The consequence here for parsing of `Accept:` headers is that we support the way Go outputs headers, but not all language HTTP libraries have a facility to output multiple headers instead of a single list header. This change ensures that the following (valid) header blocks all parse to the same result for the purposes of what is being tested here: ``` Accept: a/b Accept: b/c Accept: d/e ``` ``` Accept: a/b; q=0.5, b/c Accept: d/e ``` ``` Accept: a/b; q=0.1, b/c; q=0.2, d/e; q=0.8 ``` Signed-off-by: Andrew "Tianon" Page --- docs/handlers/api_test.go | 4 ++-- docs/handlers/images.go | 20 ++++++++++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/docs/handlers/api_test.go b/docs/handlers/api_test.go index 076207ed..93585d45 100644 --- a/docs/handlers/api_test.go +++ b/docs/handlers/api_test.go @@ -1586,8 +1586,8 @@ func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) if err != nil { t.Fatalf("Error constructing request: %s", err) } - req.Header.Set("Accept", manifestlist.MediaTypeManifestList) - req.Header.Add("Accept", schema1.MediaTypeSignedManifest) + // multiple headers in mixed list format to ensure we parse correctly server-side + req.Header.Set("Accept", fmt.Sprintf(` %s ; q=0.8 , %s ; q=0.5 `, manifestlist.MediaTypeManifestList, schema1.MediaTypeSignedManifest)) req.Header.Add("Accept", schema2.MediaTypeManifest) resp, err = http.DefaultClient.Do(req) if err != nil { diff --git a/docs/handlers/images.go b/docs/handlers/images.go index dd2ed2c8..df7f869b 100644 --- a/docs/handlers/images.go +++ b/docs/handlers/images.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "net/http" + "strings" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -98,8 +99,23 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http supportsSchema2 := false supportsManifestList := false - if acceptHeaders, ok := r.Header["Accept"]; ok { - for _, mediaType := range acceptHeaders { + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range r.Header["Accept"] { + // r.Header[...] is a slice in case the request contains the same header more than once + // if the header isn't set, we'll get the zero value, which "range" will handle gracefully + + // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + // remove "; q=..." if present + if i := strings.Index(mediaType, ";"); i >= 0 { + mediaType = mediaType[:i] + } + + // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") + mediaType = strings.TrimSpace(mediaType) + if mediaType == schema2.MediaTypeManifest { supportsSchema2 = true } From 9a27ea7323224ee6e58efd3b5153828dc063c873 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 13 Jun 2016 11:30:42 -0700 Subject: [PATCH 494/501] Add support for Let's Encrypt Add configuration and certificate manager to use letsencrypt Signed-off-by: Derek McGowan (github: dmcgowan) --- docs/registry.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/registry.go b/docs/registry.go index aec6a030..559f724c 100644 --- a/docs/registry.go +++ b/docs/registry.go @@ -9,6 +9,8 @@ import ( "os" "time" + "rsc.io/letsencrypt" + log "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus/formatters/logstash" "github.com/bugsnag/bugsnag-go" @@ -111,11 +113,10 @@ func (registry *Registry) ListenAndServe() error { return err } - if config.HTTP.TLS.Certificate != "" { + if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { tlsConf := &tls.Config{ ClientAuth: tls.NoClientCert, NextProtos: []string{"http/1.1"}, - Certificates: make([]tls.Certificate, 1), MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: []uint16{ @@ -130,9 +131,26 @@ func (registry *Registry) ListenAndServe() error { }, } - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err + if config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + if config.HTTP.TLS.Certificate != "" { + return fmt.Errorf("cannot specify both certificate and Let's Encrypt") + } + var m letsencrypt.Manager + if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { + return err + } + if !m.Registered() { + if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { + return err + } + } + tlsConf.GetCertificate = m.GetCertificate + } else { + tlsConf.Certificates = make([]tls.Certificate, 1) + tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) + if err != nil { + return err + } } if len(config.HTTP.TLS.ClientCAs) != 0 { From 7b97265d9551898e767c9c57e7bb2cc6a1606198 Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Mon, 7 Mar 2016 18:41:20 -0300 Subject: [PATCH 495/501] Expose EndpointType parameter in swift storage driver Signed-off-by: Cezar Sa Espinola --- docs/storage/driver/swift/swift.go | 2 ++ docs/storage/driver/swift/swift_test.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/docs/storage/driver/swift/swift.go b/docs/storage/driver/swift/swift.go index b72d0436..4191b8ba 100644 --- a/docs/storage/driver/swift/swift.go +++ b/docs/storage/driver/swift/swift.go @@ -72,6 +72,7 @@ type Parameters struct { AuthVersion int Container string Prefix string + EndpointType string InsecureSkipVerify bool ChunkSize int SecretKey string @@ -182,6 +183,7 @@ func New(params Parameters) (*Driver, error) { Domain: params.Domain, DomainId: params.DomainID, TrustId: params.TrustID, + EndpointType: swift.EndpointType(params.EndpointType), Transport: transport, ConnectTimeout: 60 * time.Second, Timeout: 15 * 60 * time.Second, diff --git a/docs/storage/driver/swift/swift_test.go b/docs/storage/driver/swift/swift_test.go index 655aa996..8979bd33 100644 --- a/docs/storage/driver/swift/swift_test.go +++ b/docs/storage/driver/swift/swift_test.go @@ -34,6 +34,7 @@ func init() { container string region string AuthVersion int + endpointType string insecureSkipVerify bool secretKey string accessKey string @@ -54,6 +55,7 @@ func init() { container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) + endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) secretKey = os.Getenv("SWIFT_SECRET_KEY") accessKey = os.Getenv("SWIFT_ACCESS_KEY") @@ -90,6 +92,7 @@ func init() { AuthVersion, container, root, + endpointType, insecureSkipVerify, defaultChunkSize, secretKey, From d2e5d5c22c8cb2d37c527a1f1eaf95299f13584b Mon Sep 17 00:00:00 2001 From: Richard Scothern Date: Mon, 13 Jun 2016 17:35:06 -0700 Subject: [PATCH 496/501] If resumable digest support is disabled, detct this when closing the blobwriter and allow the close to continue. Also update the name of the function. Signed-off-by: Richard Scothern --- docs/handlers/blobupload.go | 2 +- docs/storage/blobwriter.go | 6 +++--- docs/storage/blobwriter_nonresumable.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index aa9c9f4b..e4133ce8 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -77,7 +77,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if size := upload.Size(); size != buh.State.Offset { defer upload.Close() - ctxu.GetLogger(ctx).Infof("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) diff --git a/docs/storage/blobwriter.go b/docs/storage/blobwriter.go index 48ac8a75..668a6fc9 100644 --- a/docs/storage/blobwriter.go +++ b/docs/storage/blobwriter.go @@ -86,10 +86,10 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) return canonical, nil } -// Rollback the blob upload process, releasing any resources associated with +// Cancel the blob upload process, releasing any resources associated with // the writer and canceling the operation. func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + context.GetLogger(ctx).Debug("(*blobWriter).Cancel") if err := bw.fileWriter.Cancel(); err != nil { return err } @@ -142,7 +142,7 @@ func (bw *blobWriter) Close() error { return errors.New("blobwriter close after commit") } - if err := bw.storeHashState(bw.blobStore.ctx); err != nil { + if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { return err } diff --git a/docs/storage/blobwriter_nonresumable.go b/docs/storage/blobwriter_nonresumable.go index 39166876..32f13097 100644 --- a/docs/storage/blobwriter_nonresumable.go +++ b/docs/storage/blobwriter_nonresumable.go @@ -7,7 +7,7 @@ import ( ) // resumeHashAt is a noop when resumable digest support is disabled. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { +func (bw *blobWriter) resumeDigest(ctx context.Context) error { return errResumableDigestNotAvailable } From 6eadd3f4dc3fedeebe231af218fe932654bdb905 Mon Sep 17 00:00:00 2001 From: bin liu Date: Wed, 22 Jun 2016 12:40:21 +0800 Subject: [PATCH 497/501] fix typos Signed-off-by: bin liu --- docs/api/errcode/register.go | 2 +- docs/storage/manifeststore.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api/errcode/register.go b/docs/api/errcode/register.go index 71cf6f7a..d1e8826c 100644 --- a/docs/api/errcode/register.go +++ b/docs/api/errcode/register.go @@ -55,7 +55,7 @@ var ( HTTPStatusCode: http.StatusForbidden, }) - // ErrorCodeUnavailable provides a common error to report unavialability + // ErrorCodeUnavailable provides a common error to report unavailability // of a service or endpoint. ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ Value: "UNAVAILABLE", diff --git a/docs/storage/manifeststore.go b/docs/storage/manifeststore.go index 68483c95..9e8065bb 100644 --- a/docs/storage/manifeststore.go +++ b/docs/storage/manifeststore.go @@ -123,7 +123,7 @@ func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest return "", fmt.Errorf("unrecognized manifest type %T", manifest) } -// Delete removes the revision of the specified manfiest. +// Delete removes the revision of the specified manifest. func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") return ms.blobStore.Delete(ctx, dgst) From 1c5cb12745e1497e67999283cdf89d9805c1079a Mon Sep 17 00:00:00 2001 From: Josh Chorlton Date: Mon, 27 Jun 2016 17:39:25 -0700 Subject: [PATCH 498/501] fixed s3 Delete bug due to read-after-delete inconsistency Signed-off-by: Josh Chorlton --- docs/storage/driver/s3-aws/s3.go | 49 ++++++++++++++++++--------- docs/storage/driver/s3-aws/s3_test.go | 33 ++++++++++++++++++ 2 files changed, 66 insertions(+), 16 deletions(-) diff --git a/docs/storage/driver/s3-aws/s3.go b/docs/storage/driver/s3-aws/s3.go index 902abeb4..1240ec17 100644 --- a/docs/storage/driver/s3-aws/s3.go +++ b/docs/storage/driver/s3-aws/s3.go @@ -561,45 +561,62 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e return d.Delete(ctx, sourcePath) } +func min(a, b int) int { + if a < b { + return a + } + return b +} + // Delete recursively deletes all objects stored at "path" and its subpaths. +// We must be careful since S3 does not guarantee read after delete consistency func (d *driver) Delete(ctx context.Context, path string) error { - resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + listObjectsInput := &s3.ListObjectsInput{ Bucket: aws.String(d.Bucket), Prefix: aws.String(d.s3Path(path)), - }) - if err != nil || len(resp.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} } + for { + // list all the objects + resp, err := d.S3.ListObjects(listObjectsInput) - s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + // resp.Contents can only be empty on the first call + // if there were no more results to return after the first call, resp.IsTruncated would have been false + // and the loop would be exited without recalling ListObjects + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } - for len(resp.Contents) > 0 { for _, key := range resp.Contents { s3Objects = append(s3Objects, &s3.ObjectIdentifier{ Key: key.Key, }) } + // resp.Contents must have at least one element or we would have returned not found + listObjectsInput.Marker = resp.Contents[len(resp.Contents)-1].Key + + // from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned" + // if everything has been returned, break + if resp.IsTruncated == nil || !*resp.IsTruncated { + break + } + } + + // need to chunk objects into groups of 1000 per s3 restrictions + total := len(s3Objects) + for i := 0; i < total; i += 1000 { _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ Bucket: aws.String(d.Bucket), Delete: &s3.Delete{ - Objects: s3Objects, + Objects: s3Objects[i:min(i+1000, total)], Quiet: aws.Bool(false), }, }) - if err != nil { - return nil - } - - resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(d.Bucket), - Prefix: aws.String(d.s3Path(path)), - }) if err != nil { return err } } - return nil } diff --git a/docs/storage/driver/s3-aws/s3_test.go b/docs/storage/driver/s3-aws/s3_test.go index bb64ccf4..70358763 100644 --- a/docs/storage/driver/s3-aws/s3_test.go +++ b/docs/storage/driver/s3-aws/s3_test.go @@ -203,3 +203,36 @@ func TestStorageClass(t *testing.T) { } } + +func TestOverThousandBlobs(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + ctx := context.Background() + for i := 0; i < 1005; i++ { + filename := "/thousandfiletest/file" + strconv.Itoa(i) + contents := []byte("contents") + err = standardDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + } + + // cant actually verify deletion because read-after-delete is inconsistent, but can ensure no errors + err = standardDriver.Delete(ctx, "/thousandfiletest") + if err != nil { + t.Fatalf("unexpected error deleting thousand files: %v", err) + } +} From 9e211edc9dee82b5ca2deec9f5a996cf1e946e4d Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 28 Jun 2016 14:44:51 -0700 Subject: [PATCH 499/501] Changes the client Tags All() method to follow links This returns all tags even when the registry forces pagination. Signed-off-by: Brian Bland --- docs/client/repository.go | 40 +++++++++++-------- docs/client/repository_test.go | 73 ++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 16 deletions(-) diff --git a/docs/client/repository.go b/docs/client/repository.go index 323ab508..97312556 100644 --- a/docs/client/repository.go +++ b/docs/client/repository.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "time" "github.com/docker/distribution" @@ -213,28 +214,35 @@ func (t *tags) All(ctx context.Context) ([]string, error) { return tags, err } - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) + for { + resp, err := t.client.Get(u) if err != nil { return tags, err } + defer resp.Body.Close() - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + u = strings.Trim(strings.Split(link, ";")[0], "<>") + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) } - tags = tagsResponse.Tags - return tags, nil } - return tags, HandleErrorResponse(resp) } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { diff --git a/docs/client/repository_test.go b/docs/client/repository_test.go index 19b6ca2c..d945596b 100644 --- a/docs/client/repository_test.go +++ b/docs/client/repository_test.go @@ -3,6 +3,7 @@ package client import ( "bytes" "crypto/rand" + "encoding/json" "fmt" "io" "log" @@ -949,6 +950,78 @@ func TestManifestTags(t *testing.T) { // TODO(dmcgowan): Check for error cases } +func TestManifestTagsPaginated(t *testing.T) { + s := httptest.NewServer(http.NotFoundHandler()) + defer s.Close() + + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") + tagsList := []string{"tag1", "tag2", "funtag"} + var m testutil.RequestResponseMap + for i := 0; i < 3; i++ { + body, err := json.Marshal(map[string]interface{}{ + "name": "test.example.com/repo/tags/list", + "tags": []string{tagsList[i]}, + }) + if err != nil { + t.Fatal(err) + } + queryParams := make(map[string][]string) + if i > 0 { + queryParams["n"] = []string{"1"} + queryParams["last"] = []string{tagsList[i-1]} + } + headers := http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(body))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }) + if i < 2 { + headers.Set("Link", "<"+s.URL+"/v2/"+repo.Name()+"/tags/list?n=1&last="+tagsList[i]+`>; rel="next"`) + } + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + QueryParams: queryParams, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: body, + Headers: headers, + }, + }) + } + + s.Config.Handler = testutil.NewHandler(m) + + r, err := NewRepository(context.Background(), repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) + if err != nil { + t.Fatal(tags, err) + } + if len(tags) != 3 { + t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) + } + + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } +} + func TestManifestUnauthorized(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) From c8aba9b484f71c6b95ae6576c3e04b3d7a68e78f Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Fri, 1 Jul 2016 10:59:32 -0300 Subject: [PATCH 500/501] registry: avoid formatting errors with %#v Signed-off-by: Cezar Sa Espinola --- docs/handlers/blobupload.go | 2 +- docs/storage/blobstore.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/handlers/blobupload.go b/docs/handlers/blobupload.go index e4133ce8..3afb4739 100644 --- a/docs/handlers/blobupload.go +++ b/docs/handlers/blobupload.go @@ -246,7 +246,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) default: - ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) + ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } diff --git a/docs/storage/blobstore.go b/docs/storage/blobstore.go index 84f6660f..4274cc9e 100644 --- a/docs/storage/blobstore.go +++ b/docs/storage/blobstore.go @@ -64,7 +64,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr // content already present return desc, nil } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) + context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) // real error, return it return distribution.Descriptor{}, err } From 1f3cc5912473c6565b0650aa2e61ca1c56d7986e Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 12 Jul 2016 12:18:54 -0600 Subject: [PATCH 501/501] Document TOOMANYREQUESTS error code Add entries with this error code in registry/api/v2/descriptors.go. Signed-off-by: Aaron Lehmann --- docs/api/v2/descriptors.go | 49 +++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/docs/api/v2/descriptors.go b/docs/api/v2/descriptors.go index fc42c1c4..9979abae 100644 --- a/docs/api/v2/descriptors.go +++ b/docs/api/v2/descriptors.go @@ -175,6 +175,27 @@ var ( errcode.ErrorCodeDenied, }, } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } ) const ( @@ -202,17 +223,6 @@ const ( ... ] }` - - unauthorizedErrorsBody = `{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -}` ) // APIDescriptor exports descriptions of the layout of the v2 registry API. @@ -391,6 +401,7 @@ var routeDescriptors = []RouteDescriptor{ StatusCode: http.StatusNotFound, }, unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -445,6 +456,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, { @@ -481,6 +493,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -535,6 +548,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -592,6 +606,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", @@ -661,6 +676,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", @@ -769,6 +785,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, { @@ -843,6 +860,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -909,6 +927,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -993,6 +1012,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, { @@ -1039,6 +1059,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, { @@ -1103,6 +1124,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -1175,6 +1197,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -1249,6 +1272,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, { @@ -1334,6 +1358,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -1424,6 +1449,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, }, @@ -1480,6 +1506,7 @@ var routeDescriptors = []RouteDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, + tooManyRequestsDescriptor, }, }, },