Merge pull request #384 from liubin/fixtypos

fix some typos in source comments
This commit is contained in:
Stephen Day 2015-04-17 11:55:19 -07:00
commit 57341dbb0d
7 changed files with 10 additions and 10 deletions

View file

@ -3,7 +3,7 @@
// An access controller has a simple interface with a single `Authorized` // An access controller has a simple interface with a single `Authorized`
// method which checks that a given request is authorized to perform one or // method which checks that a given request is authorized to perform one or
// more actions on one or more resources. This method should return a non-nil // more actions on one or more resources. This method should return a non-nil
// error if the requset is not authorized. // error if the request is not authorized.
// //
// An implementation registers its access controller by name with a constructor // An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller. // which accepts an options map for configuring the access controller.
@ -50,7 +50,7 @@ type Resource struct {
} }
// Access describes a specific action that is // Access describes a specific action that is
// requested or allowed for a given recource. // requested or allowed for a given resource.
type Access struct { type Access struct {
Resource Resource
Action string Action string

View file

@ -7,7 +7,7 @@ import (
) )
// joseBase64UrlEncode encodes the given data using the standard base64 url // joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters ommitted in accordance // encoding format but with all trailing '=' characters omitted in accordance
// with the jose specification. // with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string { func joseBase64UrlEncode(b []byte) string {

View file

@ -101,7 +101,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
} }
case "ReadStream": case "ReadStream":
path, _ := request.Parameters["Path"].(string) path, _ := request.Parameters["Path"].(string)
// Depending on serialization method, Offset may be convereted to any int/uint type // Depending on serialization method, Offset may be converted to any int/uint type
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
reader, err := driver.ReadStream(path, offset) reader, err := driver.ReadStream(path, offset)
var response ReadStreamResponse var response ReadStreamResponse
@ -116,9 +116,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
} }
case "WriteStream": case "WriteStream":
path, _ := request.Parameters["Path"].(string) path, _ := request.Parameters["Path"].(string)
// Depending on serialization method, Offset may be convereted to any int/uint type // Depending on serialization method, Offset may be converted to any int/uint type
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
// Depending on serialization method, Size may be convereted to any int/uint type // Depending on serialization method, Size may be converted to any int/uint type
size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int()
reader, _ := request.Parameters["Reader"].(io.ReadCloser) reader, _ := request.Parameters["Reader"].(io.ReadCloser)
err := driver.WriteStream(path, offset, size, reader) err := driver.WriteStream(path, offset, size, reader)

View file

@ -435,7 +435,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(received, check.DeepEquals, fullContents) c.Assert(received, check.DeepEquals, fullContents)
// Writing past size of file extends file (no offest error). We would like // Writing past size of file extends file (no offset error). We would like
// to write chunk 4 one chunk length past chunk 3. It should be successful // to write chunk 4 one chunk length past chunk 3. It should be successful
// and the resulting file will be 5 chunks long, with a chunk of all // and the resulting file will be 5 chunks long, with a chunk of all
// zeros. // zeros.

View file

@ -336,7 +336,7 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) {
// createTestLayer creates a simple test layer in the provided driver under // createTestLayer creates a simple test layer in the provided driver under
// tarsum dgst, returning the sha256 digest location. This is implemented // tarsum dgst, returning the sha256 digest location. This is implemented
// peicemeal and should probably be replaced by the uploader when it's ready. // piecemeal and should probably be replaced by the uploader when it's ready.
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
h := sha256.New() h := sha256.New()
rd := io.TeeReader(content, h) rd := io.TeeReader(content, h)

View file

@ -182,7 +182,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error {
} }
if offset == int64(lw.resumableDigester.Len()) { if offset == int64(lw.resumableDigester.Len()) {
// State of digester is already at the requseted offset. // State of digester is already at the requested offset.
return nil return nil
} }

View file

@ -387,7 +387,7 @@ type layerLinkPathSpec struct {
func (layerLinkPathSpec) pathSpec() {} func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user // blobAlgorithmReplacer does some very simple path sanitization for user
// input. Mostly, this is to provide some heirachry for tarsum digests. Paths // input. Mostly, this is to provide some hierarchy for tarsum digests. Paths
// should be "safe" before getting this far due to strict digest requirements // should be "safe" before getting this far due to strict digest requirements
// but we can add further path conversion here, if needed. // but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer( var blobAlgorithmReplacer = strings.NewReplacer(