context: remove definition of Context

Back in the before time, the best practices surrounding usage of Context
weren't quite worked out. We defined our own type to make usage easier.
As this packaged was used elsewhere, it make it more and more
challenging to integrate with the forked `Context` type. Now that it is
available in the standard library, we can just use that one directly.

To make usage more consistent, we now use `dcontext` when referring to
the distribution context package.

Signed-off-by: Stephen J Day <stephen.day@docker.com>
This commit is contained in:
Stephen J Day 2017-08-11 15:31:16 -07:00
parent 7a8efe719e
commit 9c88801a12
No known key found for this signature in database
GPG key ID: 67B3DED84EDC823F
90 changed files with 396 additions and 373 deletions

View file

@ -1,6 +1,7 @@
package proxy
import (
"context"
"io"
"net/http"
"strconv"
@ -8,7 +9,7 @@ import (
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/opencontainers/go-digest"
@ -116,7 +117,7 @@ func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) e
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
served, err := pbs.serveLocal(ctx, w, r, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
dcontext.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
return err
}
@ -140,12 +141,12 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter,
go func(dgst digest.Digest) {
if err := pbs.storeLocal(ctx, dgst); err != nil {
context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
dcontext.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
}
blobRef, err := reference.WithDigest(pbs.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return
}

View file

@ -1,6 +1,7 @@
package proxy
import (
"context"
"io/ioutil"
"math/rand"
"net/http"
@ -10,7 +11,6 @@ import (
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/docker/distribution/registry/storage"

View file

@ -1,10 +1,11 @@
package proxy
import (
"context"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/proxy/scheduler"
"github.com/opencontainers/go-digest"
@ -72,7 +73,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
// Schedule the manifest blob for removal
repoBlob, err := reference.WithDigest(pms.repositoryName, dgst)
if err != nil {
context.GetLogger(ctx).Errorf("Error creating reference: %s", err)
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return nil, err
}

View file

@ -1,12 +1,12 @@
package proxy
import (
"context"
"io"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"

View file

@ -1,6 +1,7 @@
package proxy
import (
"context"
"fmt"
"net/http"
"net/url"
@ -8,7 +9,7 @@ import (
"github.com/docker/distribution"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/client"
"github.com/docker/distribution/registry/client/auth"
@ -218,7 +219,7 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error
return err
}
context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm)
dcontext.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm)
return nil
}

View file

@ -1,8 +1,9 @@
package proxy
import (
"context"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
// proxyTagService supports local and remote lookup of tags.

View file

@ -1,13 +1,13 @@
package proxy
import (
"context"
"reflect"
"sort"
"sync"
"testing"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
)
type mockTagStore struct {

View file

@ -1,12 +1,13 @@
package scheduler
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/docker/distribution/context"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
)
@ -120,7 +121,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
return fmt.Errorf("Scheduler already started")
}
context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
ttles.stopped = false
// Start timer for each deserialized entry
@ -142,7 +143,7 @@ func (ttles *TTLExpirationScheduler) Start() error {
err := ttles.writeState()
if err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
} else {
ttles.indexDirty = false
}
@ -163,7 +164,7 @@ func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duratio
Expiry: time.Now().Add(ttl),
EntryType: eType,
}
context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now()))
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
oldEntry.timer.Stop()
}
@ -193,10 +194,10 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.
ref, err := reference.Parse(entry.Key)
if err == nil {
if err := f(ref); err != nil {
context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
dcontext.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
}
} else {
context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
}
delete(ttles.entries, entry.Key)
@ -210,7 +211,7 @@ func (ttles *TTLExpirationScheduler) Stop() {
defer ttles.Unlock()
if err := ttles.writeState(); err != nil {
context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
}
for _, entry := range ttles.entries {