forked from TrueCloudLab/distribution
Merge pull request #713 from stevvooe/descriptor-size-field
Use "Size" field to describe blobs over "Length"
This commit is contained in:
commit
5e9d0702a1
14 changed files with 63 additions and 42 deletions
4
blobs.go
4
blobs.go
|
@ -49,8 +49,8 @@ type Descriptor struct {
|
||||||
// encoded as utf-8.
|
// encoded as utf-8.
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
MediaType string `json:"mediaType,omitempty"`
|
||||||
|
|
||||||
// Length in bytes of content.
|
// Size in bytes of content.
|
||||||
Length int64 `json:"length,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
|
|
||||||
// Digest uniquely identifies the content. A byte stream can be verified
|
// Digest uniquely identifies the content. A byte stream can be verified
|
||||||
// against against this digest.
|
// against against this digest.
|
||||||
|
|
|
@ -84,8 +84,9 @@ manifest:
|
||||||
"action": "push",
|
"action": "push",
|
||||||
"target": {
|
"target": {
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||||
"length": 1,
|
"size": 1,
|
||||||
"digest": "sha256:0123456789abcdef0",
|
"digest": "sha256:0123456789abcdef0",
|
||||||
|
"length": 1,
|
||||||
"repository": "library/test",
|
"repository": "library/test",
|
||||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||||
},
|
},
|
||||||
|
@ -105,6 +106,11 @@ manifest:
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> __NOTE(stevvooe):__ As of version 2.1, the `length` field for event targets
|
||||||
|
> is being deprecated for the `size` field, bringing the target in line with
|
||||||
|
> common nomenclature. Both will continue to be set for the foreseeable
|
||||||
|
> future. Newer code should favor `size` but accept either.
|
||||||
|
|
||||||
## Envelope
|
## Envelope
|
||||||
|
|
||||||
The envelope contains one or more events, with the following json structure:
|
The envelope contains one or more events, with the following json structure:
|
||||||
|
|
|
@ -97,6 +97,7 @@ func (b *bridge) createManifestEvent(action string, repo string, sm *manifest.Si
|
||||||
}
|
}
|
||||||
|
|
||||||
event.Target.Length = int64(len(p))
|
event.Target.Length = int64(len(p))
|
||||||
|
event.Target.Size = int64(len(p))
|
||||||
event.Target.Digest, err = digest.FromBytes(p)
|
event.Target.Digest, err = digest.FromBytes(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -122,6 +123,7 @@ func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distri
|
||||||
func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) {
|
func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) {
|
||||||
event := b.createEvent(action)
|
event := b.createEvent(action)
|
||||||
event.Target.Descriptor = desc
|
event.Target.Descriptor = desc
|
||||||
|
event.Target.Length = desc.Size
|
||||||
event.Target.Repository = repo
|
event.Target.Repository = repo
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
|
@ -54,6 +54,10 @@ type Event struct {
|
||||||
|
|
||||||
distribution.Descriptor
|
distribution.Descriptor
|
||||||
|
|
||||||
|
// Length in bytes of content. Same as Size field in Descriptor.
|
||||||
|
// Provided for backwards compatibility.
|
||||||
|
Length int64 `json:"length,omitempty"`
|
||||||
|
|
||||||
// Repository identifies the named repository.
|
// Repository identifies the named repository.
|
||||||
Repository string `json:"repository,omitempty"`
|
Repository string `json:"repository,omitempty"`
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
"action": "push",
|
"action": "push",
|
||||||
"target": {
|
"target": {
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
"mediaType": "application/vnd.docker.distribution.manifest.v1+json",
|
||||||
"length": 1,
|
"size": 1,
|
||||||
"digest": "sha256:0123456789abcdef0",
|
"digest": "sha256:0123456789abcdef0",
|
||||||
|
"length": 1,
|
||||||
"repository": "library/test",
|
"repository": "library/test",
|
||||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||||
},
|
},
|
||||||
|
@ -47,8 +48,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
"action": "push",
|
"action": "push",
|
||||||
"target": {
|
"target": {
|
||||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||||
"length": 2,
|
"size": 2,
|
||||||
"digest": "tarsum.v2+sha256:0123456789abcdef1",
|
"digest": "tarsum.v2+sha256:0123456789abcdef1",
|
||||||
|
"length": 2,
|
||||||
"repository": "library/test",
|
"repository": "library/test",
|
||||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||||
},
|
},
|
||||||
|
@ -72,8 +74,9 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
"action": "push",
|
"action": "push",
|
||||||
"target": {
|
"target": {
|
||||||
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
"mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||||
"length": 3,
|
"size": 3,
|
||||||
"digest": "tarsum.v2+sha256:0123456789abcdef2",
|
"digest": "tarsum.v2+sha256:0123456789abcdef2",
|
||||||
|
"length": 3,
|
||||||
"repository": "library/test",
|
"repository": "library/test",
|
||||||
"url": "http://example.com/v2/library/test/manifests/latest"
|
"url": "http://example.com/v2/library/test/manifests/latest"
|
||||||
},
|
},
|
||||||
|
@ -115,7 +118,8 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
manifestPush = prototype
|
manifestPush = prototype
|
||||||
manifestPush.ID = "asdf-asdf-asdf-asdf-0"
|
manifestPush.ID = "asdf-asdf-asdf-asdf-0"
|
||||||
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
|
manifestPush.Target.Digest = "sha256:0123456789abcdef0"
|
||||||
manifestPush.Target.Length = int64(1)
|
manifestPush.Target.Length = 1
|
||||||
|
manifestPush.Target.Size = 1
|
||||||
manifestPush.Target.MediaType = manifest.ManifestMediaType
|
manifestPush.Target.MediaType = manifest.ManifestMediaType
|
||||||
manifestPush.Target.Repository = "library/test"
|
manifestPush.Target.Repository = "library/test"
|
||||||
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
||||||
|
@ -125,6 +129,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
|
layerPush0.ID = "asdf-asdf-asdf-asdf-1"
|
||||||
layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1"
|
layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1"
|
||||||
layerPush0.Target.Length = 2
|
layerPush0.Target.Length = 2
|
||||||
|
layerPush0.Target.Size = 2
|
||||||
layerPush0.Target.MediaType = layerMediaType
|
layerPush0.Target.MediaType = layerMediaType
|
||||||
layerPush0.Target.Repository = "library/test"
|
layerPush0.Target.Repository = "library/test"
|
||||||
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
||||||
|
@ -134,6 +139,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
|
||||||
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
|
layerPush1.ID = "asdf-asdf-asdf-asdf-2"
|
||||||
layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2"
|
layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2"
|
||||||
layerPush1.Target.Length = 3
|
layerPush1.Target.Length = 3
|
||||||
|
layerPush1.Target.Size = 3
|
||||||
layerPush1.Target.MediaType = layerMediaType
|
layerPush1.Target.MediaType = layerMediaType
|
||||||
layerPush1.Target.Repository = "library/test"
|
layerPush1.Target.Repository = "library/test"
|
||||||
layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest"
|
||||||
|
|
|
@ -343,7 +343,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Length), nil
|
return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
||||||
|
@ -366,7 +366,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
|
||||||
|
|
||||||
desc := distribution.Descriptor{
|
desc := distribution.Descriptor{
|
||||||
MediaType: mediaType,
|
MediaType: mediaType,
|
||||||
Length: int64(len(p)),
|
Size: int64(len(p)),
|
||||||
Digest: dgstr.Digest(),
|
Digest: dgstr.Digest(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -435,7 +435,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
|
||||||
|
|
||||||
return distribution.Descriptor{
|
return distribution.Descriptor{
|
||||||
MediaType: resp.Header.Get("Content-Type"),
|
MediaType: resp.Header.Get("Content-Type"),
|
||||||
Length: length,
|
Size: length,
|
||||||
Digest: dgst,
|
Digest: dgst,
|
||||||
}, nil
|
}, nil
|
||||||
case http.StatusNotFound:
|
case http.StatusNotFound:
|
||||||
|
|
|
@ -127,8 +127,8 @@ func TestBlobExists(t *testing.T) {
|
||||||
t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1)
|
t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat.Length != int64(len(b1)) {
|
if stat.Size != int64(len(b1)) {
|
||||||
t.Fatalf("Unexpected length: %d, expected %d", stat.Length, len(b1))
|
t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(dmcgowan): Test error cases and ErrBlobUnknown case
|
// TODO(dmcgowan): Test error cases and ErrBlobUnknown case
|
||||||
|
@ -244,14 +244,14 @@ func TestBlobUploadChunked(t *testing.T) {
|
||||||
|
|
||||||
blob, err := upload.Commit(ctx, distribution.Descriptor{
|
blob, err := upload.Commit(ctx, distribution.Descriptor{
|
||||||
Digest: dgst,
|
Digest: dgst,
|
||||||
Length: int64(len(b1)),
|
Size: int64(len(b1)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if blob.Length != int64(len(b1)) {
|
if blob.Size != int64(len(b1)) {
|
||||||
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1))
|
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -352,14 +352,14 @@ func TestBlobUploadMonolithic(t *testing.T) {
|
||||||
|
|
||||||
blob, err := upload.Commit(ctx, distribution.Descriptor{
|
blob, err := upload.Commit(ctx, distribution.Descriptor{
|
||||||
Digest: dgst,
|
Digest: dgst,
|
||||||
Length: int64(len(b1)),
|
Size: int64(len(b1)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if blob.Length != int64(len(b1)) {
|
if blob.Size != int64(len(b1)) {
|
||||||
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1))
|
t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,7 +178,7 @@ func TestSimpleBlobRead(t *testing.T) {
|
||||||
t.Fatalf("error getting seeker size for random layer: %v", err)
|
t.Fatalf("error getting seeker size for random layer: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Length: randomLayerSize}
|
descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
|
||||||
t.Logf("desc: %v", descBefore)
|
t.Logf("desc: %v", descBefore)
|
||||||
|
|
||||||
desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
|
desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
|
||||||
|
@ -186,8 +186,8 @@ func TestSimpleBlobRead(t *testing.T) {
|
||||||
t.Fatalf("error adding blob to blobservice: %v", err)
|
t.Fatalf("error adding blob to blobservice: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if desc.Length != randomLayerSize {
|
if desc.Size != randomLayerSize {
|
||||||
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Length, randomLayerSize)
|
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
|
rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
|
||||||
|
@ -330,8 +330,8 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio
|
||||||
|
|
||||||
if nn, err := io.Copy(wr, rd); err != nil {
|
if nn, err := io.Copy(wr, rd); err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
} else if nn != desc.Length {
|
} else if nn != desc.Size {
|
||||||
return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Length)
|
return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wr.Commit(ctx, desc)
|
return wr.Commit(ctx, desc)
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||||
case driver.ErrUnsupportedMethod:
|
case driver.ErrUnsupportedMethod:
|
||||||
// Fallback to serving the content directly.
|
// Fallback to serving the content directly.
|
||||||
br, err := newFileReader(ctx, bs.driver, path, desc.Length)
|
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h
|
||||||
|
|
||||||
if w.Header().Get("Content-Length") == "" {
|
if w.Header().Get("Content-Length") == "" {
|
||||||
// Set the content length if not already set.
|
// Set the content length if not already set.
|
||||||
w.Header().Set("Content-Length", fmt.Sprint(desc.Length))
|
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
|
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
|
||||||
|
|
|
@ -50,7 +50,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return newFileReader(ctx, bs.driver, path, desc.Length)
|
return newFileReader(ctx, bs.driver, path, desc.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put stores the content p in the blob store, calculating the digest. If the
|
// Put stores the content p in the blob store, calculating the digest. If the
|
||||||
|
@ -81,7 +81,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr
|
||||||
// TODO(stevvooe): Write out mediatype here, as well.
|
// TODO(stevvooe): Write out mediatype here, as well.
|
||||||
|
|
||||||
return distribution.Descriptor{
|
return distribution.Descriptor{
|
||||||
Length: int64(len(p)),
|
Size: int64(len(p)),
|
||||||
|
|
||||||
// NOTE(stevvooe): The central blob store firewalls media types from
|
// NOTE(stevvooe): The central blob store firewalls media types from
|
||||||
// other users. The caller should look this up and override the value
|
// other users. The caller should look this up and override the value
|
||||||
|
@ -179,7 +179,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
|
||||||
// mediatype that overrides the main one.
|
// mediatype that overrides the main one.
|
||||||
|
|
||||||
return distribution.Descriptor{
|
return distribution.Descriptor{
|
||||||
Length: fi.Size(),
|
Size: fi.Size(),
|
||||||
|
|
||||||
// NOTE(stevvooe): The central blob store firewalls media types from
|
// NOTE(stevvooe): The central blob store firewalls media types from
|
||||||
// other users. The caller should look this up and override the value
|
// other users. The caller should look this up and override the value
|
||||||
|
|
|
@ -148,7 +148,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
|
||||||
// NOTE(stevvooe): We really don't care if the file is
|
// NOTE(stevvooe): We really don't care if the file is
|
||||||
// not actually present for the reader. We now assume
|
// not actually present for the reader. We now assume
|
||||||
// that the desc length is zero.
|
// that the desc length is zero.
|
||||||
desc.Length = 0
|
desc.Size = 0
|
||||||
default:
|
default:
|
||||||
// Any other error we want propagated up the stack.
|
// Any other error we want propagated up the stack.
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
|
@ -161,14 +161,14 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
|
||||||
bw.size = fi.Size()
|
bw.size = fi.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
if desc.Length > 0 {
|
if desc.Size > 0 {
|
||||||
if desc.Length != bw.size {
|
if desc.Size != bw.size {
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
|
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// if provided 0 or negative length, we can assume caller doesn't know or
|
// if provided 0 or negative length, we can assume caller doesn't know or
|
||||||
// care about length.
|
// care about length.
|
||||||
desc.Length = bw.size
|
desc.Size = bw.size
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(stevvooe): This section is very meandering. Need to be broken down
|
// TODO(stevvooe): This section is very meandering. Need to be broken down
|
||||||
|
@ -216,7 +216,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the file from the backend driver and validate it.
|
// Read the file from the backend driver and validate it.
|
||||||
fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Length)
|
fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
4
registry/storage/cache/cache.go
vendored
4
registry/storage/cache/cache.go
vendored
|
@ -23,8 +23,8 @@ func ValidateDescriptor(desc distribution.Descriptor) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if desc.Length < 0 {
|
if desc.Size < 0 {
|
||||||
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Length)
|
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if desc.MediaType == "" {
|
if desc.MediaType == "" {
|
||||||
|
|
11
registry/storage/cache/redis/redis.go
vendored
11
registry/storage/cache/redis/redis.go
vendored
|
@ -66,17 +66,20 @@ func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Di
|
||||||
// stat provides an internal stat call that takes a connection parameter. This
|
// stat provides an internal stat call that takes a connection parameter. This
|
||||||
// allows some internal management of the connection scope.
|
// allows some internal management of the connection scope.
|
||||||
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) {
|
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||||
reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype"))
|
reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(reply) < 2 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
|
// NOTE(stevvooe): The "size" field used to be "length". We treat a
|
||||||
|
// missing "size" field here as an unknown blob, which causes a cache
|
||||||
|
// miss, effectively migrating the field.
|
||||||
|
if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
|
||||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
var desc distribution.Descriptor
|
var desc distribution.Descriptor
|
||||||
if _, err := redis.Scan(reply, &desc.Digest, &desc.Length, &desc.MediaType); err != nil {
|
if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil {
|
||||||
return distribution.Descriptor{}, err
|
return distribution.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +107,7 @@ func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst
|
||||||
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
|
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||||
if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst),
|
if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst),
|
||||||
"digest", desc.Digest,
|
"digest", desc.Digest,
|
||||||
"length", desc.Length); err != nil {
|
"size", desc.Size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
6
registry/storage/cache/suite.go
vendored
6
registry/storage/cache/suite.go
vendored
|
@ -35,14 +35,14 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context,
|
||||||
|
|
||||||
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
|
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
|
||||||
Digest: "sha384:abc",
|
Digest: "sha384:abc",
|
||||||
Length: 10,
|
Size: 10,
|
||||||
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
|
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
|
||||||
t.Fatalf("expected error with invalid digest: %v", err)
|
t.Fatalf("expected error with invalid digest: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{
|
if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{
|
||||||
Digest: "",
|
Digest: "",
|
||||||
Length: 10,
|
Size: 10,
|
||||||
MediaType: "application/octet-stream"}); err == nil {
|
MediaType: "application/octet-stream"}); err == nil {
|
||||||
t.Fatalf("expected error setting value on invalid descriptor")
|
t.Fatalf("expected error setting value on invalid descriptor")
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
|
||||||
localDigest := digest.Digest("sha384:abc")
|
localDigest := digest.Digest("sha384:abc")
|
||||||
expected := distribution.Descriptor{
|
expected := distribution.Descriptor{
|
||||||
Digest: "sha256:abc",
|
Digest: "sha256:abc",
|
||||||
Length: 10,
|
Size: 10,
|
||||||
MediaType: "application/octet-stream"}
|
MediaType: "application/octet-stream"}
|
||||||
|
|
||||||
cache, err := provider.RepositoryScoped("foo/bar")
|
cache, err := provider.RepositoryScoped("foo/bar")
|
||||||
|
|
Loading…
Reference in a new issue