registry/{storage,handlers}: limit content sizes

Under certain circumstances, the use of `StorageDriver.GetContent` can
result in unbounded memory allocations. In particualr, this happens when
accessing a layer through the manifests endpoint.

This problem is mitigated by setting a 4MB limit when using to access
content that may have been accepted from a user. In practice, this means
setting the limit with the use of `BlobProvider.Get` by wrapping
`StorageDriver.GetContent` in a helper that uses `StorageDriver.Reader`
with a `limitReader` that returns an error.

When mitigating this security issue, we also noticed that the size of
manifests uploaded to the registry is also unlimited. We apply similar
logic to the request body of payloads that are full buffered.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
(cherry picked from commit 55ea440428)
Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2017-07-06 16:01:26 -07:00
parent 9bc9d212ec
commit 58d239d723
No known key found for this signature in database
GPG key ID: 67B3DED84EDC823F
5 changed files with 91 additions and 13 deletions

View file

@ -179,8 +179,8 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque
// TODO(dmcgowan): support Content-Range header to seek and write range
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil {
// copyFullPayload reports the error if necessary
if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PATCH"); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}
@ -218,8 +218,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
return
}
if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil {
// copyFullPayload reports the error if necessary
if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PUT"); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}

View file

@ -6,7 +6,6 @@ import (
"net/http"
ctxu "github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
)
// closeResources closes all the provided resources after running the target
@ -23,7 +22,9 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
// copyFullPayload copies the payload of an HTTP request to destWriter. If it
// receives less content than expected, and the client disconnected during the
// upload, it avoids sending a 400 error to keep the logs cleaner.
func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error {
//
// The copy will be limited to `limit` bytes, if limit is greater than zero.
func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, context ctxu.Context, action string) error {
// Get a channel that tells us if the client disconnects
var clientClosed <-chan bool
if notifier, ok := responseWriter.(http.CloseNotifier); ok {
@ -32,8 +33,13 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr
ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter)
}
var body = r.Body
if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit)
}
// Read in the data, if any.
copied, err := io.Copy(destWriter, r.Body)
copied, err := io.Copy(destWriter, body)
if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) {
// Didn't receive as much content as expected. Did the client
// disconnect during the request? If so, avoid returning a 400
@ -58,7 +64,6 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr
if err != nil {
ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err)
*errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err))
return err
}

View file

@ -23,6 +23,7 @@ import (
const (
defaultArch = "amd64"
defaultOS = "linux"
maxManifestBodySize = 4 << 20
)
// imageManifestDispatcher takes the request context and builds the
@ -240,8 +241,9 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http
}
var jsonBuf bytes.Buffer
if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil {
if err := copyFullPayload(w, r, &jsonBuf, maxManifestBodySize, imh, "image manifest PUT"); err != nil {
// copyFullPayload reports the error if necessary
imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error()))
return
}

View file

@ -27,7 +27,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error
return nil, err
}
p, err := bs.driver.GetContent(ctx, bp)
p, err := getContent(ctx, bs.driver, bp)
if err != nil {
switch err.(type) {
case driver.PathNotFoundError:
@ -37,7 +37,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error
return nil, err
}
return p, err
return p, nil
}
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {

71
registry/storage/io.go Normal file
View file

@ -0,0 +1,71 @@
package storage
import (
"errors"
"io"
"io/ioutil"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
const (
maxBlobGetSize = 4 << 20
)
func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) {
r, err := driver.Reader(ctx, p, 0)
if err != nil {
return nil, err
}
return readAllLimited(r, maxBlobGetSize)
}
func readAllLimited(r io.Reader, limit int64) ([]byte, error) {
r = limitReader(r, limit)
return ioutil.ReadAll(r)
}
// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader,
// this returns an error when the limit reached.
func limitReader(r io.Reader, n int64) io.Reader {
return &limitedReader{r: r, n: n}
}
// limitedReader implements a reader that errors when the limit is reached.
//
// Partially cribbed from net/http.MaxBytesReader.
type limitedReader struct {
r io.Reader // underlying reader
n int64 // max bytes remaining
err error // sticky error
}
func (l *limitedReader) Read(p []byte) (n int, err error) {
if l.err != nil {
return 0, l.err
}
if len(p) == 0 {
return 0, nil
}
// If they asked for a 32KB byte read but only 5 bytes are
// remaining, no need to read 32KB. 6 bytes will answer the
// question of the whether we hit the limit or go past it.
if int64(len(p)) > l.n+1 {
p = p[:l.n+1]
}
n, err = l.r.Read(p)
if int64(n) <= l.n {
l.n -= int64(n)
l.err = err
return n, err
}
n = int(l.n)
l.n = 0
l.err = errors.New("storage: read exceeds limit")
return n, l.err
}