From 12e68998e101a43d6616ac950beb6e91e06b57b2 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Tue, 21 Oct 2014 13:25:04 -0700 Subject: [PATCH 001/165] + license --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 3f95694180729cf78629f4062617ac10132bec50 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 21 Oct 2014 15:02:20 -0700 Subject: [PATCH 002/165] Adds storage driver interface, tests, and two basic implementations --- .travis.yml | 5 + main/storagedriver/filesystem/filesystem.go | 26 ++ main/storagedriver/inmemory/inmemory.go | 10 + storagedriver/filesystem/filesystem.go | 173 ++++++++++ storagedriver/filesystem/filesystem_test.go | 24 ++ storagedriver/inmemory/inmemory.go | 147 ++++++++ storagedriver/inmemory/inmemory_test.go | 20 ++ storagedriver/ipc/client.go | 285 ++++++++++++++++ storagedriver/ipc/ipc.go | 83 +++++ storagedriver/ipc/server.go | 160 +++++++++ storagedriver/storagedriver.go | 34 ++ storagedriver/testsuites/testsuites.go | 353 ++++++++++++++++++++ 12 files changed, 1320 insertions(+) create mode 100644 .travis.yml create mode 100644 main/storagedriver/filesystem/filesystem.go create mode 100644 main/storagedriver/inmemory/inmemory.go create mode 100644 storagedriver/filesystem/filesystem.go create mode 100644 storagedriver/filesystem/filesystem_test.go create mode 100644 storagedriver/inmemory/inmemory.go create mode 100644 storagedriver/inmemory/inmemory_test.go create mode 100644 storagedriver/ipc/client.go create mode 100644 storagedriver/ipc/ipc.go create mode 100644 storagedriver/ipc/server.go create mode 100644 storagedriver/storagedriver.go create mode 100644 storagedriver/testsuites/testsuites.go diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..d48424c3 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: +- 1.3 +- tip diff --git a/main/storagedriver/filesystem/filesystem.go b/main/storagedriver/filesystem/filesystem.go new file mode 100644 index 00000000..8c0e2677 --- /dev/null +++ b/main/storagedriver/filesystem/filesystem.go @@ -0,0 +1,26 @@ +package main + +import ( + "encoding/json" + "os" + + "github.com/docker/docker-registry/storagedriver/filesystem" + "github.com/docker/docker-registry/storagedriver/ipc" +) + +func main() { + parametersBytes := []byte(os.Args[1]) + var parameters map[string]interface{} + err := json.Unmarshal(parametersBytes, ¶meters) + if err != nil { + panic(err) + } + rootDirectory := "/tmp/registry" + if parameters != nil { + rootDirParam, ok := parameters["RootDirectory"].(string) + if ok && rootDirParam != "" { + rootDirectory = rootDirParam + } + } + ipc.Server(filesystem.NewDriver(rootDirectory)) +} diff --git a/main/storagedriver/inmemory/inmemory.go b/main/storagedriver/inmemory/inmemory.go new file mode 100644 index 00000000..f55c8d5f --- /dev/null +++ b/main/storagedriver/inmemory/inmemory.go @@ -0,0 +1,10 @@ +package main + +import ( + "github.com/docker/docker-registry/storagedriver/inmemory" + "github.com/docker/docker-registry/storagedriver/ipc" +) + +func main() { + ipc.Server(inmemory.NewDriver()) +} diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go new file mode 100644 index 00000000..79106e37 --- /dev/null +++ b/storagedriver/filesystem/filesystem.go @@ -0,0 +1,173 @@ +package filesystem + +import ( + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker-registry/storagedriver" +) + +type FilesystemDriver struct { + rootDirectory string +} + +func NewDriver(rootDirectory string) *FilesystemDriver { + return &FilesystemDriver{rootDirectory} +} + +func (d *FilesystemDriver) subPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +func (d *FilesystemDriver) GetContent(path string) ([]byte, error) { + contents, err := ioutil.ReadFile(d.subPath(path)) + if err != nil { + return nil, storagedriver.PathNotFoundError{path} + } + return contents, nil +} + +func (d *FilesystemDriver) PutContent(subPath string, contents []byte) error { + fullPath := d.subPath(subPath) + parentDir := path.Dir(fullPath) + err := os.MkdirAll(parentDir, 0755) + if err != nil { + return err + } + + err = ioutil.WriteFile(fullPath, contents, 0644) + return err +} + +func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { + file, err := os.OpenFile(d.subPath(path), os.O_RDONLY, 0644) + if err != nil { + return nil, err + } + + seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + if err != nil { + file.Close() + return nil, err + } else if seekPos < int64(offset) { + file.Close() + return nil, storagedriver.InvalidOffsetError{path, offset} + } + + return file, nil +} + +func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error { + defer reader.Close() + + resumableOffset, err := d.ResumeWritePosition(subPath) + if _, pathNotFound := err.(storagedriver.PathNotFoundError); err != nil && !pathNotFound { + return err + } + + if offset > resumableOffset { + return storagedriver.InvalidOffsetError{subPath, offset} + } + + fullPath := d.subPath(subPath) + parentDir := path.Dir(fullPath) + err = os.MkdirAll(parentDir, 0755) + if err != nil { + return err + } + + var file *os.File + if offset == 0 { + file, err = os.Create(fullPath) + } else { + file, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_APPEND, 0) + } + + if err != nil { + return err + } + defer file.Close() + + buf := make([]byte, 32*1024) + for { + bytesRead, er := reader.Read(buf) + if bytesRead > 0 { + bytesWritten, ew := file.WriteAt(buf[0:bytesRead], int64(offset)) + if bytesWritten > 0 { + offset += uint64(bytesWritten) + } + if ew != nil { + err = ew + break + } + if bytesRead != bytesWritten { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return err +} + +func (d *FilesystemDriver) ResumeWritePosition(subPath string) (uint64, error) { + fullPath := d.subPath(subPath) + + fileInfo, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return 0, err + } else if err != nil { + return 0, storagedriver.PathNotFoundError{subPath} + } + return uint64(fileInfo.Size()), nil +} + +func (d *FilesystemDriver) List(prefix string) ([]string, error) { + prefix = strings.TrimRight(prefix, "/") + fullPath := d.subPath(prefix) + + dir, err := os.Open(fullPath) + if err != nil { + return nil, err + } + + fileNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + + keys := make([]string, 0, len(fileNames)) + for _, fileName := range fileNames { + keys = append(keys, path.Join(prefix, fileName)) + } + + return keys, nil +} + +func (d *FilesystemDriver) Move(sourcePath string, destPath string) error { + err := os.Rename(d.subPath(sourcePath), d.subPath(destPath)) + return err +} + +func (d *FilesystemDriver) Delete(subPath string) error { + fullPath := d.subPath(subPath) + + _, err := os.Stat(fullPath) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + return storagedriver.PathNotFoundError{subPath} + } + + err = os.RemoveAll(fullPath) + return err +} diff --git a/storagedriver/filesystem/filesystem_test.go b/storagedriver/filesystem/filesystem_test.go new file mode 100644 index 00000000..c445e178 --- /dev/null +++ b/storagedriver/filesystem/filesystem_test.go @@ -0,0 +1,24 @@ +package filesystem + +import ( + "os" + "testing" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + rootDirectory := "/tmp/driver" + os.RemoveAll(rootDirectory) + + filesystemDriverConstructor := func() (storagedriver.StorageDriver, error) { + return NewDriver(rootDirectory), nil + } + testsuites.RegisterInProcessSuite(filesystemDriverConstructor) + testsuites.RegisterIPCSuite("filesystem", map[string]string{"RootDirectory": rootDirectory}) +} diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go new file mode 100644 index 00000000..ea44bb39 --- /dev/null +++ b/storagedriver/inmemory/inmemory.go @@ -0,0 +1,147 @@ +package inmemory + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "regexp" + "strings" + "sync" + + "github.com/docker/docker-registry/storagedriver" +) + +type InMemoryDriver struct { + storage map[string][]byte + mutex sync.RWMutex +} + +func NewDriver() *InMemoryDriver { + return &InMemoryDriver{storage: make(map[string][]byte)} +} + +func (d *InMemoryDriver) GetContent(path string) ([]byte, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + contents, ok := d.storage[path] + if !ok { + return nil, storagedriver.PathNotFoundError{path} + } + return contents, nil +} + +func (d *InMemoryDriver) PutContent(path string, contents []byte) error { + d.mutex.Lock() + defer d.mutex.Unlock() + d.storage[path] = contents + return nil +} + +func (d *InMemoryDriver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + contents, err := d.GetContent(path) + if err != nil { + return nil, err + } else if len(contents) < int(offset) { + return nil, storagedriver.InvalidOffsetError{path, offset} + } + + src := contents[offset:] + buf := make([]byte, len(src)) + copy(buf, src) + return ioutil.NopCloser(bytes.NewReader(buf)), nil +} + +func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { + defer reader.Close() + d.mutex.RLock() + defer d.mutex.RUnlock() + + resumableOffset, err := d.ResumeWritePosition(path) + if err != nil { + return err + } + + if offset > resumableOffset { + return storagedriver.InvalidOffsetError{path, offset} + } + + contents, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + if offset > 0 { + contents = append(d.storage[path][0:offset], contents...) + } + + d.storage[path] = contents + return nil +} + +func (d *InMemoryDriver) ResumeWritePosition(path string) (uint64, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + contents, ok := d.storage[path] + if !ok { + return 0, nil + } + return uint64(len(contents)), nil +} + +func (d *InMemoryDriver) List(prefix string) ([]string, error) { + subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s/[^/]+", prefix)) + if err != nil { + return nil, err + } + + d.mutex.RLock() + defer d.mutex.RUnlock() + // we use map to collect uniq keys + keySet := make(map[string]struct{}) + for k := range d.storage { + if key := subPathMatcher.FindString(k); key != "" { + keySet[key] = struct{}{} + } + } + + keys := make([]string, 0, len(keySet)) + for k := range keySet { + keys = append(keys, k) + } + return keys, nil +} + +func (d *InMemoryDriver) Move(sourcePath string, destPath string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + contents, ok := d.storage[sourcePath] + if !ok { + return storagedriver.PathNotFoundError{sourcePath} + } + d.storage[destPath] = contents + delete(d.storage, sourcePath) + return nil +} + +func (d *InMemoryDriver) Delete(path string) error { + d.mutex.Lock() + defer d.mutex.Unlock() + subPaths := make([]string, 0) + for k := range d.storage { + if strings.HasPrefix(k, path) { + subPaths = append(subPaths, k) + } + } + + if len(subPaths) == 0 { + return storagedriver.PathNotFoundError{path} + } + + for _, subPath := range subPaths { + delete(d.storage, subPath) + } + return nil +} diff --git a/storagedriver/inmemory/inmemory_test.go b/storagedriver/inmemory/inmemory_test.go new file mode 100644 index 00000000..fa62d30d --- /dev/null +++ b/storagedriver/inmemory/inmemory_test.go @@ -0,0 +1,20 @@ +package inmemory + +import ( + "testing" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return NewDriver(), nil + } + testsuites.RegisterInProcessSuite(inmemoryDriverConstructor) + testsuites.RegisterIPCSuite("inmemory", nil) +} diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go new file mode 100644 index 00000000..c4e50a4d --- /dev/null +++ b/storagedriver/ipc/client.go @@ -0,0 +1,285 @@ +package ipc + +import ( + "encoding/json" + "io" + "net" + "os" + "os/exec" + "path" + "syscall" + + "github.com/docker/libchan" + "github.com/docker/libchan/spdy" +) + +type StorageDriverClient struct { + subprocess *exec.Cmd + socket *os.File + transport *spdy.Transport + sender libchan.Sender +} + +func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { + paramsBytes, err := json.Marshal(parameters) + if err != nil { + return nil, err + } + + driverPath := os.ExpandEnv(path.Join("$GOPATH", "bin", name)) + if _, err := os.Stat(driverPath); os.IsNotExist(err) { + driverPath = path.Join(path.Dir(os.Args[0]), name) + } + if _, err := os.Stat(driverPath); os.IsNotExist(err) { + driverPath, err = exec.LookPath(name) + if err != nil { + return nil, err + } + } + + command := exec.Command(driverPath, string(paramsBytes)) + + return &StorageDriverClient{ + subprocess: command, + }, nil +} + +func (driver *StorageDriverClient) Start() error { + fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) + if err != nil { + return err + } + + childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") + parentSocket := os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") + + driver.subprocess.Stdout = os.Stdout + driver.subprocess.Stderr = os.Stderr + driver.subprocess.ExtraFiles = []*os.File{childSocket} + + if err = driver.subprocess.Start(); err != nil { + parentSocket.Close() + return err + } + + if err = childSocket.Close(); err != nil { + parentSocket.Close() + return err + } + + connection, err := net.FileConn(parentSocket) + if err != nil { + parentSocket.Close() + return err + } + transport, err := spdy.NewClientTransport(connection) + if err != nil { + parentSocket.Close() + return err + } + sender, err := transport.NewSendChannel() + if err != nil { + transport.Close() + parentSocket.Close() + return err + } + + driver.socket = parentSocket + driver.transport = transport + driver.sender = sender + + return nil +} + +func (driver *StorageDriverClient) Stop() error { + closeSenderErr := driver.sender.Close() + closeTransportErr := driver.transport.Close() + closeSocketErr := driver.socket.Close() + killErr := driver.subprocess.Process.Kill() + + if closeSenderErr != nil { + return closeSenderErr + } else if closeTransportErr != nil { + return closeTransportErr + } else if closeSocketErr != nil { + return closeSocketErr + } + return killErr +} + +func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "GetContent", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + var response GetContentResponse + err = receiver.Receive(&response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error + } + + return response.Content, nil +} + +func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path, "Contents": contents} + err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + var response PutContentResponse + err = receiver.Receive(&response) + if err != nil { + panic(err) + return err + } + + if response.Error != nil { + return response.Error + } + + return nil +} + +func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.ReadCloser, error) { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path, "Offset": offset} + err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + var response ReadStreamResponse + err = receiver.Receive(&response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error + } + + return response.Reader, nil +} + +func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": WrapReadCloser(reader)} + err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + var response WriteStreamResponse + err = receiver.Receive(&response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error + } + + return nil +} + +func (driver *StorageDriverClient) ResumeWritePosition(path string) (uint64, error) { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "ResumeWritePosition", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return 0, err + } + + var response ResumeWritePositionResponse + err = receiver.Receive(&response) + if err != nil { + return 0, err + } + + if response.Error != nil { + return 0, response.Error + } + + return response.Position, nil +} + +func (driver *StorageDriverClient) List(prefix string) ([]string, error) { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Prefix": prefix} + err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return nil, err + } + + var response ListResponse + err = receiver.Receive(&response) + if err != nil { + return nil, err + } + + if response.Error != nil { + return nil, response.Error + } + + return response.Keys, nil +} + +func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} + err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + var response MoveResponse + err = receiver.Receive(&response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error + } + + return nil +} + +func (driver *StorageDriverClient) Delete(path string) error { + receiver, remoteSender := libchan.Pipe() + + params := map[string]interface{}{"Path": path} + err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) + if err != nil { + return err + } + + var response DeleteResponse + err = receiver.Receive(&response) + if err != nil { + return err + } + + if response.Error != nil { + return response.Error + } + + return nil +} diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go new file mode 100644 index 00000000..ab960b82 --- /dev/null +++ b/storagedriver/ipc/ipc.go @@ -0,0 +1,83 @@ +package ipc + +import ( + "errors" + "fmt" + "io" + "reflect" + + "github.com/docker/libchan" +) + +type Request struct { + Type string + Parameters map[string]interface{} + ResponseChannel libchan.Sender +} + +type noWriteReadWriteCloser struct { + io.ReadCloser +} + +func (r noWriteReadWriteCloser) Write(p []byte) (n int, err error) { + return 0, errors.New("Write unsupported") +} + +func WrapReadCloser(readCloser io.ReadCloser) io.ReadWriteCloser { + return noWriteReadWriteCloser{readCloser} +} + +type responseError struct { + Type string + Message string +} + +func ResponseError(err error) *responseError { + if err == nil { + return nil + } + return &responseError{ + Type: reflect.TypeOf(err).String(), + Message: err.Error(), + } +} + +func (err *responseError) Error() string { + return fmt.Sprintf("%s: %s", err.Type, err.Message) +} + +type GetContentResponse struct { + Content []byte + Error *responseError +} + +type PutContentResponse struct { + Error *responseError +} + +type ReadStreamResponse struct { + Reader io.ReadWriteCloser + Error *responseError +} + +type WriteStreamResponse struct { + Error *responseError +} + +type ResumeWritePositionResponse struct { + Position uint64 + Error *responseError +} + +type ListResponse struct { + Keys []string + Error *responseError +} + +type MoveResponse struct { + Error *responseError +} + +type DeleteResponse struct { + Error *responseError +} diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go new file mode 100644 index 00000000..2e240f42 --- /dev/null +++ b/storagedriver/ipc/server.go @@ -0,0 +1,160 @@ +package ipc + +import ( + "io" + "net" + "os" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/libchan" + "github.com/docker/libchan/spdy" +) + +func Server(driver storagedriver.StorageDriver) error { + childSocket := os.NewFile(3, "childSocket") + defer childSocket.Close() + conn, err := net.FileConn(childSocket) + if err != nil { + panic(err) + } + defer conn.Close() + if transport, err := spdy.NewServerTransport(conn); err != nil { + panic(err) + } else { + for { + receiver, err := transport.WaitReceiveChannel() + if err != nil { + panic(err) + } + go receive(driver, receiver) + } + return nil + } +} + +func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { + for { + var request Request + err := receiver.Receive(&request) + if err != nil { + panic(err) + } + go handleRequest(driver, request) + } +} + +func handleRequest(driver storagedriver.StorageDriver, request Request) { + + switch request.Type { + case "GetContent": + path, _ := request.Parameters["Path"].(string) + content, err := driver.GetContent(path) + response := GetContentResponse{ + Content: content, + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "PutContent": + path, _ := request.Parameters["Path"].(string) + contents, _ := request.Parameters["Contents"].([]byte) + err := driver.PutContent(path, contents) + response := PutContentResponse{ + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "ReadStream": + var offset uint64 + + path, _ := request.Parameters["Path"].(string) + offset, ok := request.Parameters["Offset"].(uint64) + if !ok { + offsetSigned, _ := request.Parameters["Offset"].(int64) + offset = uint64(offsetSigned) + } + reader, err := driver.ReadStream(path, offset) + var response ReadStreamResponse + if err != nil { + response = ReadStreamResponse{Error: ResponseError(err)} + } else { + response = ReadStreamResponse{Reader: WrapReadCloser(reader)} + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "WriteStream": + var offset uint64 + + path, _ := request.Parameters["Path"].(string) + offset, ok := request.Parameters["Offset"].(uint64) + if !ok { + offsetSigned, _ := request.Parameters["Offset"].(int64) + offset = uint64(offsetSigned) + } + size, ok := request.Parameters["Size"].(uint64) + if !ok { + sizeSigned, _ := request.Parameters["Size"].(int64) + size = uint64(sizeSigned) + } + reader, _ := request.Parameters["Reader"].(io.ReadCloser) + err := driver.WriteStream(path, offset, size, reader) + response := WriteStreamResponse{ + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "ResumeWritePosition": + path, _ := request.Parameters["Path"].(string) + position, err := driver.ResumeWritePosition(path) + response := ResumeWritePositionResponse{ + Position: position, + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "List": + prefix, _ := request.Parameters["Prefix"].(string) + keys, err := driver.List(prefix) + response := ListResponse{ + Keys: keys, + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "Move": + sourcePath, _ := request.Parameters["SourcePath"].(string) + destPath, _ := request.Parameters["DestPath"].(string) + err := driver.Move(sourcePath, destPath) + response := MoveResponse{ + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + case "Delete": + path, _ := request.Parameters["Path"].(string) + err := driver.Delete(path) + response := DeleteResponse{ + Error: ResponseError(err), + } + err = request.ResponseChannel.Send(&response) + if err != nil { + panic(err) + } + default: + panic(request) + } +} diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go new file mode 100644 index 00000000..bfbfc110 --- /dev/null +++ b/storagedriver/storagedriver.go @@ -0,0 +1,34 @@ +package storagedriver + +import ( + "fmt" + "io" +) + +type StorageDriver interface { + GetContent(path string) ([]byte, error) + PutContent(path string, content []byte) error + ReadStream(path string, offset uint64) (io.ReadCloser, error) + WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error + ResumeWritePosition(path string) (uint64, error) + List(prefix string) ([]string, error) + Move(sourcePath string, destPath string) error + Delete(path string) error +} + +type PathNotFoundError struct { + Path string +} + +func (err PathNotFoundError) Error() string { + return fmt.Sprintf("Path not found: %s", err.Path) +} + +type InvalidOffsetError struct { + Path string + Offset uint64 +} + +func (err InvalidOffsetError) Error() string { + return fmt.Sprintf("Invalid offset: %d for path: %s", err.Offset, err.Path) +} diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go new file mode 100644 index 00000000..7ca196d6 --- /dev/null +++ b/storagedriver/testsuites/testsuites.go @@ -0,0 +1,353 @@ +package testsuites + +import ( + "bytes" + "io/ioutil" + "math/rand" + "path" + "sort" + "testing" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/ipc" + + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +func RegisterInProcessSuite(driverConstructor DriverConstructor) { + Suite(&DriverSuite{ + Constructor: driverConstructor, + }) +} + +func RegisterIPCSuite(driverName string, ipcParams map[string]string) { + suite := &DriverSuite{ + Constructor: func() (storagedriver.StorageDriver, error) { + d, err := ipc.NewDriverClient(driverName, ipcParams) + if err != nil { + return nil, err + } + err = d.Start() + if err != nil { + return nil, err + } + return d, nil + }, + } + suite.Teardown = func() error { + driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) + return driverClient.Stop() + } + Suite(suite) +} + +type DriverConstructor func() (storagedriver.StorageDriver, error) +type DriverTeardown func() error + +type DriverSuite struct { + Constructor DriverConstructor + Teardown DriverTeardown + storagedriver.StorageDriver +} + +type TestDriverConfig struct { + name string + params map[string]string +} + +func (suite *DriverSuite) SetUpSuite(c *C) { + d, err := suite.Constructor() + c.Assert(err, IsNil) + suite.StorageDriver = d +} + +func (suite *DriverSuite) TearDownSuite(c *C) { + if suite.Teardown != nil { + err := suite.Teardown() + c.Assert(err, IsNil) + } +} + +func (suite *DriverSuite) TestWriteRead1(c *C) { + filename := randomString(32) + contents := []byte("a") + suite.writeReadCompare(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteRead2(c *C) { + filename := randomString(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompare(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteRead3(c *C) { + filename := randomString(32) + contents := []byte(randomString(32)) + suite.writeReadCompare(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteRead4(c *C) { + filename := randomString(32) + contents := []byte(randomString(1024 * 1024)) + suite.writeReadCompare(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestReadNonexistent(c *C) { + filename := randomString(32) + _, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestWriteReadStreams1(c *C) { + filename := randomString(32) + contents := []byte("a") + suite.writeReadCompareStreams(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteReadStreams2(c *C) { + filename := randomString(32) + contents := []byte("\xc3\x9f") + suite.writeReadCompareStreams(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteReadStreams3(c *C) { + filename := randomString(32) + contents := []byte(randomString(32)) + suite.writeReadCompareStreams(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestWriteReadStreams4(c *C) { + filename := randomString(32) + contents := []byte(randomString(1024 * 1024)) + suite.writeReadCompareStreams(c, filename, contents, contents) +} + +func (suite *DriverSuite) TestContinueStreamAppend(c *C) { + filename := randomString(32) + + chunkSize := uint64(32) + + contentsChunk1 := []byte(randomString(chunkSize)) + contentsChunk2 := []byte(randomString(chunkSize)) + contentsChunk3 := []byte(randomString(chunkSize)) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + err := suite.StorageDriver.WriteStream(filename, 0, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(contentsChunk1))) + c.Assert(err, IsNil) + + offset, err := suite.StorageDriver.ResumeWritePosition(filename) + c.Assert(err, IsNil) + if offset > chunkSize { + c.Fatalf("Offset too large, %d > %d", offset, chunkSize) + } + err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) + c.Assert(err, IsNil) + + offset, err = suite.StorageDriver.ResumeWritePosition(filename) + c.Assert(err, IsNil) + if offset > 2*chunkSize { + c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize) + } + + err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:]))) + c.Assert(err, IsNil) + + received, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, IsNil) + c.Assert(received, DeepEquals, fullContents) + + offset, err = suite.StorageDriver.ResumeWritePosition(filename) + c.Assert(err, IsNil) + c.Assert(offset, Equals, uint64(3*chunkSize)) +} + +func (suite *DriverSuite) TestReadStreamWithOffset(c *C) { + filename := randomString(32) + + chunkSize := uint64(32) + + contentsChunk1 := []byte(randomString(chunkSize)) + contentsChunk2 := []byte(randomString(chunkSize)) + contentsChunk3 := []byte(randomString(chunkSize)) + + err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(err, IsNil) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) + c.Assert(err, IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, append(contentsChunk2, contentsChunk3...)) + + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) + c.Assert(err, IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, contentsChunk3) + + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) + c.Assert(err, IsNil) + defer reader.Close() + + readContents, err = ioutil.ReadAll(reader) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, []byte{}) +} + +func (suite *DriverSuite) TestReadNonexistentStream(c *C) { + filename := randomString(32) + _, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestList(c *C) { + rootDirectory := randomString(uint64(8 + rand.Intn(8))) + parentDirectory := rootDirectory + "/" + randomString(uint64(8+rand.Intn(8))) + childFiles := make([]string, 50) + for i := 0; i < len(childFiles); i++ { + childFile := parentDirectory + "/" + randomString(uint64(8+rand.Intn(8))) + childFiles[i] = childFile + err := suite.StorageDriver.PutContent(childFile, []byte(randomString(32))) + c.Assert(err, IsNil) + } + sort.Strings(childFiles) + + keys, err := suite.StorageDriver.List(rootDirectory) + c.Assert(err, IsNil) + c.Assert(keys, DeepEquals, []string{parentDirectory}) + + keys, err = suite.StorageDriver.List(parentDirectory) + c.Assert(err, IsNil) + + sort.Strings(keys) + c.Assert(keys, DeepEquals, childFiles) +} + +func (suite *DriverSuite) TestMove(c *C) { + contents := []byte(randomString(32)) + sourcePath := randomString(32) + destPath := randomString(32) + + err := suite.StorageDriver.PutContent(sourcePath, contents) + c.Assert(err, IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, IsNil) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, IsNil) + c.Assert(received, DeepEquals, contents) + + _, err = suite.StorageDriver.GetContent(sourcePath) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestMoveNonexistent(c *C) { + sourcePath := randomString(32) + destPath := randomString(32) + + err := suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestRemove(c *C) { + filename := randomString(32) + contents := []byte(randomString(32)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, IsNil) + + err = suite.StorageDriver.Delete(filename) + c.Assert(err, IsNil) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestRemoveNonexistent(c *C) { + filename := randomString(32) + err := suite.StorageDriver.Delete(filename) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) TestRemoveFolder(c *C) { + dirname := randomString(32) + filename1 := randomString(32) + filename2 := randomString(32) + contents := []byte(randomString(32)) + + err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) + c.Assert(err, IsNil) + + err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) + c.Assert(err, IsNil) + + err = suite.StorageDriver.Delete(dirname) + c.Assert(err, IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + c.Assert(err, NotNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + c.Assert(err, NotNil) +} + +func (suite *DriverSuite) writeReadCompare(c *C, filename string, contents, expected []byte) { + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, IsNil) + + readContents, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, contents) + + err = suite.StorageDriver.Delete(filename) + c.Assert(err, IsNil) +} + +func (suite *DriverSuite) writeReadCompareStreams(c *C, filename string, contents, expected []byte) { + err := suite.StorageDriver.WriteStream(filename, 0, uint64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) + c.Assert(err, IsNil) + + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, IsNil) + + c.Assert(readContents, DeepEquals, contents) + + err = suite.StorageDriver.Delete(filename) + c.Assert(err, IsNil) +} + +var pathChars = []byte("abcdefghijklmnopqrstuvwxyz") + +func randomString(length uint64) string { + b := make([]byte, length) + for i := range b { + b[i] = pathChars[rand.Intn(len(pathChars))] + } + return string(b) +} From 47ca8be42f47edbaa50644a717ddb2932b20ef61 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Fri, 24 Oct 2014 16:36:17 -0700 Subject: [PATCH 003/165] Slight additions/modifications to the test suite --- storagedriver/testsuites/testsuites.go | 37 ++++++++++++-------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 7ca196d6..dae5cc08 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -127,8 +127,9 @@ func (suite *DriverSuite) TestWriteReadStreams4(c *C) { func (suite *DriverSuite) TestContinueStreamAppend(c *C) { filename := randomString(32) + defer suite.StorageDriver.Delete(filename) - chunkSize := uint64(32) + chunkSize := uint64(5 * 1024 * 1024) contentsChunk1 := []byte(randomString(chunkSize)) contentsChunk2 := []byte(randomString(chunkSize)) @@ -159,14 +160,11 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) { received, err := suite.StorageDriver.GetContent(filename) c.Assert(err, IsNil) c.Assert(received, DeepEquals, fullContents) - - offset, err = suite.StorageDriver.ResumeWritePosition(filename) - c.Assert(err, IsNil) - c.Assert(offset, Equals, uint64(3*chunkSize)) } func (suite *DriverSuite) TestReadStreamWithOffset(c *C) { filename := randomString(32) + defer suite.StorageDriver.Delete(filename) chunkSize := uint64(32) @@ -203,15 +201,6 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *C) { c.Assert(err, IsNil) c.Assert(readContents, DeepEquals, contentsChunk3) - - reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) - c.Assert(err, IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, IsNil) - - c.Assert(readContents, DeepEquals, []byte{}) } func (suite *DriverSuite) TestReadNonexistentStream(c *C) { @@ -222,6 +211,8 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *C) { func (suite *DriverSuite) TestList(c *C) { rootDirectory := randomString(uint64(8 + rand.Intn(8))) + defer suite.StorageDriver.Delete(rootDirectory) + parentDirectory := rootDirectory + "/" + randomString(uint64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { @@ -248,6 +239,9 @@ func (suite *DriverSuite) TestMove(c *C) { sourcePath := randomString(32) destPath := randomString(32) + defer suite.StorageDriver.Delete(sourcePath) + defer suite.StorageDriver.Delete(destPath) + err := suite.StorageDriver.PutContent(sourcePath, contents) c.Assert(err, IsNil) @@ -274,6 +268,8 @@ func (suite *DriverSuite) TestRemove(c *C) { filename := randomString(32) contents := []byte(randomString(32)) + defer suite.StorageDriver.Delete(filename) + err := suite.StorageDriver.PutContent(filename, contents) c.Assert(err, IsNil) @@ -296,6 +292,9 @@ func (suite *DriverSuite) TestRemoveFolder(c *C) { filename2 := randomString(32) contents := []byte(randomString(32)) + defer suite.StorageDriver.Delete(path.Join(dirname, filename1)) + defer suite.StorageDriver.Delete(path.Join(dirname, filename2)) + err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) c.Assert(err, IsNil) @@ -313,6 +312,8 @@ func (suite *DriverSuite) TestRemoveFolder(c *C) { } func (suite *DriverSuite) writeReadCompare(c *C, filename string, contents, expected []byte) { + defer suite.StorageDriver.Delete(filename) + err := suite.StorageDriver.PutContent(filename, contents) c.Assert(err, IsNil) @@ -320,12 +321,11 @@ func (suite *DriverSuite) writeReadCompare(c *C, filename string, contents, expe c.Assert(err, IsNil) c.Assert(readContents, DeepEquals, contents) - - err = suite.StorageDriver.Delete(filename) - c.Assert(err, IsNil) } func (suite *DriverSuite) writeReadCompareStreams(c *C, filename string, contents, expected []byte) { + defer suite.StorageDriver.Delete(filename) + err := suite.StorageDriver.WriteStream(filename, 0, uint64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) c.Assert(err, IsNil) @@ -337,9 +337,6 @@ func (suite *DriverSuite) writeReadCompareStreams(c *C, filename string, content c.Assert(err, IsNil) c.Assert(readContents, DeepEquals, contents) - - err = suite.StorageDriver.Delete(filename) - c.Assert(err, IsNil) } var pathChars = []byte("abcdefghijklmnopqrstuvwxyz") From 134287336765f0df516415d74cf7e91bcf7e81b6 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Fri, 24 Oct 2014 16:37:25 -0700 Subject: [PATCH 004/165] Add s3 driver for the new Storage Layer API --- main/storagedriver/s3/s3.go | 57 ++++++++ storagedriver/s3/s3.go | 257 ++++++++++++++++++++++++++++++++++++ storagedriver/s3/s3_test.go | 29 ++++ 3 files changed, 343 insertions(+) create mode 100644 main/storagedriver/s3/s3.go create mode 100644 storagedriver/s3/s3.go create mode 100644 storagedriver/s3/s3_test.go diff --git a/main/storagedriver/s3/s3.go b/main/storagedriver/s3/s3.go new file mode 100644 index 00000000..0fbc376c --- /dev/null +++ b/main/storagedriver/s3/s3.go @@ -0,0 +1,57 @@ +package main + +import ( + "encoding/json" + "os" + "strconv" + + "github.com/crowdmob/goamz/aws" + "github.com/docker/docker-registry/storagedriver/ipc" + "github.com/docker/docker-registry/storagedriver/s3" +) + +func main() { + parametersBytes := []byte(os.Args[1]) + var parameters map[string]interface{} + err := json.Unmarshal(parametersBytes, ¶meters) + if err != nil { + panic(err) + } + + accessKey, ok := parameters["accessKey"].(string) + if !ok || accessKey == "" { + panic("No accessKey parameter") + } + + secretKey, ok := parameters["secretKey"].(string) + if !ok || secretKey == "" { + panic("No secretKey parameter") + } + + region, ok := parameters["region"].(string) + if !ok || region == "" { + panic("No region parameter") + } + + bucket, ok := parameters["bucket"].(string) + if !ok || bucket == "" { + panic("No bucket parameter") + } + + encrypt, ok := parameters["encrypt"].(string) + if !ok { + panic("No encrypt parameter") + } + + encryptBool, err := strconv.ParseBool(encrypt) + if err != nil { + panic(err) + } + + driver, err := s3.NewDriver(accessKey, secretKey, aws.GetRegion(region), encryptBool, bucket) + if err != nil { + panic(err) + } + + ipc.Server(driver) +} diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go new file mode 100644 index 00000000..26561000 --- /dev/null +++ b/storagedriver/s3/s3.go @@ -0,0 +1,257 @@ +package s3 + +import ( + "bytes" + "io" + "net/http" + "strconv" + + "github.com/crowdmob/goamz/aws" + "github.com/crowdmob/goamz/s3" + "github.com/docker/docker-registry/storagedriver" +) + +/* Chunks need to be at least 5MB to store with a multipart upload on S3 */ +const minChunkSize = uint64(5 * 1024 * 1024) + +/* The largest amount of parts you can request from S3 */ +const listPartsMax = 1000 + +type S3Driver struct { + S3 *s3.S3 + Bucket *s3.Bucket + Encrypt bool +} + +func NewDriver(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*S3Driver, error) { + auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey} + s3obj := s3.New(auth, region) + bucket := s3obj.Bucket(bucketName) + + if err := bucket.PutBucket(s3.PublicRead); err != nil { + s3Err, ok := err.(*s3.Error) + if !(ok && s3Err.Code == "BucketAlreadyOwnedByYou") { + return nil, err + } + } + + return &S3Driver{s3obj, bucket, encrypt}, nil +} + +func (d *S3Driver) GetContent(path string) ([]byte, error) { + return d.Bucket.Get(path) +} + +func (d *S3Driver) PutContent(path string, contents []byte) error { + return d.Bucket.Put(path, contents, d.getContentType(), d.getPermissions(), d.getOptions()) +} + +func (d *S3Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatUint(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(path, headers) + if resp != nil { + return resp.Body, err + } + + return nil, err +} + +func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { + defer reader.Close() + + chunkSize := minChunkSize + for size/chunkSize >= listPartsMax { + chunkSize *= 2 + } + + partNumber := 1 + totalRead := uint64(0) + multi, parts, err := d.getAllParts(path) + if err != nil { + return err + } + + if (offset) > uint64(len(parts))*chunkSize || (offset < size && offset%chunkSize != 0) { + return storagedriver.InvalidOffsetError{path, offset} + } + + if len(parts) > 0 { + partNumber = int(offset/chunkSize) + 1 + totalRead = offset + parts = parts[0 : partNumber-1] + } + + buf := make([]byte, chunkSize) + for { + bytesRead, err := io.ReadFull(reader, buf) + totalRead += uint64(bytesRead) + + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return err + } else if (uint64(bytesRead) < chunkSize) && totalRead != size { + break + } else { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:bytesRead])) + if err != nil { + + return err + } + + parts = append(parts, part) + if totalRead == size { + multi.Complete(parts) + break + } + + partNumber++ + } + } + + return nil +} + +func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) { + _, parts, err := d.getAllParts(path) + if err != nil { + return 0, err + } + + if len(parts) == 0 { + return 0, nil + } + + return (((uint64(len(parts)) - 1) * uint64(parts[0].Size)) + uint64(parts[len(parts)-1].Size)), nil +} + +func (d *S3Driver) List(prefix string) ([]string, error) { + listResponse, err := d.Bucket.List(prefix+"/", "/", "", listPartsMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for len(listResponse.Contents) > 0 || len(listResponse.CommonPrefixes) > 0 { + for _, key := range listResponse.Contents { + files = append(files, key.Key) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, commonPrefix[0:len(commonPrefix)-1]) + } + + lastFile := "" + lastDirectory := "" + lastMarker := "" + + if len(files) > 0 { + lastFile = files[len(files)-1] + } + + if len(directories) > 0 { + lastDirectory = directories[len(directories)-1] + "/" + } + + if lastDirectory > lastFile { + lastMarker = lastDirectory + } else { + lastMarker = lastFile + } + + listResponse, err = d.Bucket.List(prefix+"/", "/", lastMarker, listPartsMax) + if err != nil { + return nil, err + } + } + + return append(files, directories...), nil +} + +func (d *S3Driver) Move(sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(destPath, d.getPermissions(), s3.CopyOptions{d.getOptions(), "", d.getContentType()}, d.Bucket.Name+"/"+sourcePath) + if err != nil { + return err + } + + return d.Delete(sourcePath) +} + +func (d *S3Driver) Delete(path string) error { + listResponse, err := d.Bucket.List(path, "", "", listPartsMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{path} + } + + s3Objects := make([]s3.Object, listPartsMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + s3Objects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(s3.Delete{false, s3Objects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(path, "", "", listPartsMax) + if err != nil { + return err + } + } + + return nil +} + +func (d *S3Driver) getHighestIdMulti(path string) (multi *s3.Multi, err error) { + multis, _, err := d.Bucket.ListMulti(path, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + + uploadId := "" + + if len(multis) > 0 { + for _, m := range multis { + if m.Key == path && m.UploadId >= uploadId { + uploadId = m.UploadId + multi = m + } + } + return multi, nil + } else { + multi, err := d.Bucket.InitMulti(path, d.getContentType(), d.getPermissions(), d.getOptions()) + return multi, err + } +} + +func (d *S3Driver) getAllParts(path string) (*s3.Multi, []s3.Part, error) { + multi, err := d.getHighestIdMulti(path) + if err != nil { + return nil, nil, err + } + + parts, err := multi.ListParts() + return multi, parts, err +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*aws.Error) + return ok && s3err.Code == code +} + +func (d *S3Driver) getOptions() s3.Options { + return s3.Options{SSE: d.Encrypt} +} + +func (d *S3Driver) getPermissions() s3.ACL { + return s3.Private +} + +func (d *S3Driver) getContentType() string { + return "application/octet-stream" +} diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go new file mode 100644 index 00000000..400ec7ad --- /dev/null +++ b/storagedriver/s3/s3_test.go @@ -0,0 +1,29 @@ +package s3 + +import ( + "os" + "testing" + + "github.com/crowdmob/goamz/aws" + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/testsuites" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + accessKey := os.Getenv("ACCESS_KEY") + secretKey := os.Getenv("SECRET_KEY") + region := os.Getenv("AWS_REGION") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + + s3DriverConstructor := func() (storagedriver.StorageDriver, error) { + return NewDriver(accessKey, secretKey, aws.GetRegion(region), true, bucket) + } + + testsuites.RegisterInProcessSuite(s3DriverConstructor) + testsuites.RegisterIPCSuite("s3", map[string]string{"accessKey": accessKey, "secretKey": secretKey, "region": region, "bucket": bucket, "encrypt": encrypt}) +} From 7c892deb1c12c8587b24a2f57cad8f56f9a0817d Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 24 Oct 2014 18:33:23 -0700 Subject: [PATCH 005/165] Uses streams internally for ipc Get/Put Content This is done because libchan/spdystream does not currently support sending serialzied objects of size larger than 16MB See https://github.com/docker/libchan/issues/65 --- storagedriver/ipc/client.go | 18 ++++++++++++------ storagedriver/ipc/ipc.go | 20 +++++++++----------- storagedriver/ipc/server.go | 23 +++++++++++++++-------- storagedriver/testsuites/testsuites.go | 2 +- 4 files changed, 37 insertions(+), 26 deletions(-) diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index c4e50a4d..0025d2bc 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -1,8 +1,10 @@ package ipc import ( + "bytes" "encoding/json" "io" + "io/ioutil" "net" "os" "os/exec" @@ -116,7 +118,7 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { return nil, err } - var response GetContentResponse + var response ReadStreamResponse err = receiver.Receive(&response) if err != nil { return nil, err @@ -126,22 +128,26 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { return nil, response.Error } - return response.Content, nil + defer response.Reader.Close() + contents, err := ioutil.ReadAll(response.Reader) + if err != nil { + return nil, err + } + return contents, nil } func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Contents": contents} + params := map[string]interface{}{"Path": path, "Reader": WrapReader(bytes.NewReader(contents))} err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err } - var response PutContentResponse + var response WriteStreamResponse err = receiver.Receive(&response) if err != nil { - panic(err) return err } @@ -177,7 +183,7 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": WrapReadCloser(reader)} + params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": WrapReader(reader)} err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index ab960b82..89b0cf20 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "reflect" "github.com/docker/libchan" @@ -23,8 +24,14 @@ func (r noWriteReadWriteCloser) Write(p []byte) (n int, err error) { return 0, errors.New("Write unsupported") } -func WrapReadCloser(readCloser io.ReadCloser) io.ReadWriteCloser { - return noWriteReadWriteCloser{readCloser} +func WrapReader(reader io.Reader) io.ReadWriteCloser { + if readWriteCloser, ok := reader.(io.ReadWriteCloser); ok { + return readWriteCloser + } else if readCloser, ok := reader.(io.ReadCloser); ok { + return noWriteReadWriteCloser{readCloser} + } else { + return noWriteReadWriteCloser{ioutil.NopCloser(reader)} + } } type responseError struct { @@ -46,15 +53,6 @@ func (err *responseError) Error() string { return fmt.Sprintf("%s: %s", err.Type, err.Message) } -type GetContentResponse struct { - Content []byte - Error *responseError -} - -type PutContentResponse struct { - Error *responseError -} - type ReadStreamResponse struct { Reader io.ReadWriteCloser Error *responseError diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 2e240f42..0d39a31b 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -1,7 +1,9 @@ package ipc import ( + "bytes" "io" + "io/ioutil" "net" "os" @@ -44,14 +46,15 @@ func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { } func handleRequest(driver storagedriver.StorageDriver, request Request) { - switch request.Type { case "GetContent": path, _ := request.Parameters["Path"].(string) content, err := driver.GetContent(path) - response := GetContentResponse{ - Content: content, - Error: ResponseError(err), + var response ReadStreamResponse + if err != nil { + response = ReadStreamResponse{Error: ResponseError(err)} + } else { + response = ReadStreamResponse{Reader: WrapReader(bytes.NewReader(content))} } err = request.ResponseChannel.Send(&response) if err != nil { @@ -59,9 +62,13 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { } case "PutContent": path, _ := request.Parameters["Path"].(string) - contents, _ := request.Parameters["Contents"].([]byte) - err := driver.PutContent(path, contents) - response := PutContentResponse{ + reader, _ := request.Parameters["Reader"].(io.ReadCloser) + contents, err := ioutil.ReadAll(reader) + defer reader.Close() + if err == nil { + err = driver.PutContent(path, contents) + } + response := WriteStreamResponse{ Error: ResponseError(err), } err = request.ResponseChannel.Send(&response) @@ -82,7 +89,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { if err != nil { response = ReadStreamResponse{Error: ResponseError(err)} } else { - response = ReadStreamResponse{Reader: WrapReadCloser(reader)} + response = ReadStreamResponse{Reader: WrapReader(reader)} } err = request.ResponseChannel.Send(&response) if err != nil { diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 7ca196d6..d9d3dead 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -128,7 +128,7 @@ func (suite *DriverSuite) TestWriteReadStreams4(c *C) { func (suite *DriverSuite) TestContinueStreamAppend(c *C) { filename := randomString(32) - chunkSize := uint64(32) + chunkSize := uint64(5 * 1024 * 1024) contentsChunk1 := []byte(randomString(chunkSize)) contentsChunk2 := []byte(randomString(chunkSize)) From e3a5955cd27f011e2fd1777336d45426323f4e91 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Sun, 26 Oct 2014 10:00:53 -0700 Subject: [PATCH 006/165] Unify permissions settings --- storagedriver/s3/s3.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 26561000..a73e5e3d 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -28,7 +28,7 @@ func NewDriver(accessKey string, secretKey string, region aws.Region, encrypt bo s3obj := s3.New(auth, region) bucket := s3obj.Bucket(bucketName) - if err := bucket.PutBucket(s3.PublicRead); err != nil { + if err := bucket.PutBucket(getPermissions()); err != nil { s3Err, ok := err.(*s3.Error) if !(ok && s3Err.Code == "BucketAlreadyOwnedByYou") { return nil, err @@ -43,7 +43,7 @@ func (d *S3Driver) GetContent(path string) ([]byte, error) { } func (d *S3Driver) PutContent(path string, contents []byte) error { - return d.Bucket.Put(path, contents, d.getContentType(), d.getPermissions(), d.getOptions()) + return d.Bucket.Put(path, contents, d.getContentType(), getPermissions(), d.getOptions()) } func (d *S3Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { @@ -172,7 +172,7 @@ func (d *S3Driver) List(prefix string) ([]string, error) { func (d *S3Driver) Move(sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(destPath, d.getPermissions(), s3.CopyOptions{d.getOptions(), "", d.getContentType()}, d.Bucket.Name+"/"+sourcePath) + _, err := d.Bucket.PutCopy(destPath, getPermissions(), s3.CopyOptions{d.getOptions(), "", d.getContentType()}, d.Bucket.Name+"/"+sourcePath) if err != nil { return err } @@ -224,7 +224,7 @@ func (d *S3Driver) getHighestIdMulti(path string) (multi *s3.Multi, err error) { } return multi, nil } else { - multi, err := d.Bucket.InitMulti(path, d.getContentType(), d.getPermissions(), d.getOptions()) + multi, err := d.Bucket.InitMulti(path, d.getContentType(), getPermissions(), d.getOptions()) return multi, err } } @@ -248,7 +248,7 @@ func (d *S3Driver) getOptions() s3.Options { return s3.Options{SSE: d.Encrypt} } -func (d *S3Driver) getPermissions() s3.ACL { +func getPermissions() s3.ACL { return s3.Private } From ff81f3a71995658464a8a02407b1fbba9224ed91 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 27 Oct 2014 13:24:07 -0700 Subject: [PATCH 007/165] Adds conditional SkipCheck for storage driver tests --- storagedriver/filesystem/filesystem_test.go | 4 ++-- storagedriver/inmemory/inmemory_test.go | 4 ++-- storagedriver/s3/s3_test.go | 11 +++++++++-- storagedriver/testsuites/testsuites.go | 20 +++++++++++++++++--- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/storagedriver/filesystem/filesystem_test.go b/storagedriver/filesystem/filesystem_test.go index c445e178..15ef9663 100644 --- a/storagedriver/filesystem/filesystem_test.go +++ b/storagedriver/filesystem/filesystem_test.go @@ -19,6 +19,6 @@ func init() { filesystemDriverConstructor := func() (storagedriver.StorageDriver, error) { return NewDriver(rootDirectory), nil } - testsuites.RegisterInProcessSuite(filesystemDriverConstructor) - testsuites.RegisterIPCSuite("filesystem", map[string]string{"RootDirectory": rootDirectory}) + testsuites.RegisterInProcessSuite(filesystemDriverConstructor, testsuites.NeverSkip) + testsuites.RegisterIPCSuite("filesystem", map[string]string{"RootDirectory": rootDirectory}, testsuites.NeverSkip) } diff --git a/storagedriver/inmemory/inmemory_test.go b/storagedriver/inmemory/inmemory_test.go index fa62d30d..accbb5f8 100644 --- a/storagedriver/inmemory/inmemory_test.go +++ b/storagedriver/inmemory/inmemory_test.go @@ -15,6 +15,6 @@ func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { return NewDriver(), nil } - testsuites.RegisterInProcessSuite(inmemoryDriverConstructor) - testsuites.RegisterIPCSuite("inmemory", nil) + testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) + testsuites.RegisterIPCSuite("inmemory", nil, testsuites.NeverSkip) } diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go index 400ec7ad..b6862ab9 100644 --- a/storagedriver/s3/s3_test.go +++ b/storagedriver/s3/s3_test.go @@ -24,6 +24,13 @@ func init() { return NewDriver(accessKey, secretKey, aws.GetRegion(region), true, bucket) } - testsuites.RegisterInProcessSuite(s3DriverConstructor) - testsuites.RegisterIPCSuite("s3", map[string]string{"accessKey": accessKey, "secretKey": secretKey, "region": region, "bucket": bucket, "encrypt": encrypt}) + skipCheck := func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ACCESS_KEY, SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterInProcessSuite(s3DriverConstructor, skipCheck) + testsuites.RegisterIPCSuite("s3", map[string]string{"accessKey": accessKey, "secretKey": secretKey, "region": region, "bucket": bucket, "encrypt": encrypt}, skipCheck) } diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index dae5cc08..ff93b038 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -17,13 +17,14 @@ import ( // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } -func RegisterInProcessSuite(driverConstructor DriverConstructor) { +func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { Suite(&DriverSuite{ Constructor: driverConstructor, + SkipCheck: skipCheck, }) } -func RegisterIPCSuite(driverName string, ipcParams map[string]string) { +func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { suite := &DriverSuite{ Constructor: func() (storagedriver.StorageDriver, error) { d, err := ipc.NewDriverClient(driverName, ipcParams) @@ -36,20 +37,30 @@ func RegisterIPCSuite(driverName string, ipcParams map[string]string) { } return d, nil }, + SkipCheck: skipCheck, } suite.Teardown = func() error { + if suite.StorageDriver == nil { + return nil + } + driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) return driverClient.Stop() } Suite(suite) } +type SkipCheck func() (reason string) + +var NeverSkip = func() string { return "" } + type DriverConstructor func() (storagedriver.StorageDriver, error) type DriverTeardown func() error type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown + SkipCheck storagedriver.StorageDriver } @@ -59,6 +70,9 @@ type TestDriverConfig struct { } func (suite *DriverSuite) SetUpSuite(c *C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } d, err := suite.Constructor() c.Assert(err, IsNil) suite.StorageDriver = d @@ -129,7 +143,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) - chunkSize := uint64(5 * 1024 * 1024) + chunkSize := uint64(10 * 1024 * 1024) contentsChunk1 := []byte(randomString(chunkSize)) contentsChunk2 := []byte(randomString(chunkSize)) From ca0084fad1a033a1b8da5bfbbcbf509701aa253d Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 28 Oct 2014 18:15:40 -0700 Subject: [PATCH 008/165] Adds StorageDriverFactory, unifying creation of StorageDrivers Custom storage drivers can register a factory to create the driver by name, similar to the database/sql package's Register and Open factory.Create returns an in-process driver if registered or an IPC driver if one can be found, erroring otherwise This standardizes parameter passing for creation of storage drivers Also adds documentation for storagedriver package and children --- main/storagedriver/filesystem/filesystem.go | 13 ++-- main/storagedriver/inmemory/inmemory.go | 4 +- main/storagedriver/s3/s3.go | 39 ++---------- storagedriver/factory/factory.go | 64 +++++++++++++++++++ storagedriver/filesystem/filesystem.go | 36 ++++++++++- storagedriver/filesystem/filesystem_test.go | 4 +- storagedriver/inmemory/inmemory.go | 19 +++++- storagedriver/inmemory/inmemory_test.go | 4 +- storagedriver/ipc/client.go | 13 ++++ storagedriver/ipc/ipc.go | 15 +++++ storagedriver/ipc/server.go | 37 ++++++----- storagedriver/s3/s3.go | 68 ++++++++++++++++++++- storagedriver/s3/s3_test.go | 22 +++++-- storagedriver/storagedriver.go | 25 ++++++++ storagedriver/testsuites/testsuites.go | 6 ++ 15 files changed, 290 insertions(+), 79 deletions(-) create mode 100644 storagedriver/factory/factory.go diff --git a/main/storagedriver/filesystem/filesystem.go b/main/storagedriver/filesystem/filesystem.go index 8c0e2677..8a5fc93c 100644 --- a/main/storagedriver/filesystem/filesystem.go +++ b/main/storagedriver/filesystem/filesystem.go @@ -8,19 +8,14 @@ import ( "github.com/docker/docker-registry/storagedriver/ipc" ) +// An out-of-process filesystem driver, intended to be run by ipc.NewDriverClient func main() { parametersBytes := []byte(os.Args[1]) - var parameters map[string]interface{} + var parameters map[string]string err := json.Unmarshal(parametersBytes, ¶meters) if err != nil { panic(err) } - rootDirectory := "/tmp/registry" - if parameters != nil { - rootDirParam, ok := parameters["RootDirectory"].(string) - if ok && rootDirParam != "" { - rootDirectory = rootDirParam - } - } - ipc.Server(filesystem.NewDriver(rootDirectory)) + + ipc.StorageDriverServer(filesystem.FromParameters(parameters)) } diff --git a/main/storagedriver/inmemory/inmemory.go b/main/storagedriver/inmemory/inmemory.go index f55c8d5f..999c05d7 100644 --- a/main/storagedriver/inmemory/inmemory.go +++ b/main/storagedriver/inmemory/inmemory.go @@ -5,6 +5,8 @@ import ( "github.com/docker/docker-registry/storagedriver/ipc" ) +// An out-of-process inmemory driver, intended to be run by ipc.NewDriverClient +// This exists primarily for example and testing purposes func main() { - ipc.Server(inmemory.NewDriver()) + ipc.StorageDriverServer(inmemory.New()) } diff --git a/main/storagedriver/s3/s3.go b/main/storagedriver/s3/s3.go index 0fbc376c..aa5a1180 100644 --- a/main/storagedriver/s3/s3.go +++ b/main/storagedriver/s3/s3.go @@ -3,55 +3,24 @@ package main import ( "encoding/json" "os" - "strconv" - "github.com/crowdmob/goamz/aws" "github.com/docker/docker-registry/storagedriver/ipc" "github.com/docker/docker-registry/storagedriver/s3" ) +// An out-of-process S3 driver, intended to be run by ipc.NewDriverClient func main() { parametersBytes := []byte(os.Args[1]) - var parameters map[string]interface{} + var parameters map[string]string err := json.Unmarshal(parametersBytes, ¶meters) if err != nil { panic(err) } - accessKey, ok := parameters["accessKey"].(string) - if !ok || accessKey == "" { - panic("No accessKey parameter") - } - - secretKey, ok := parameters["secretKey"].(string) - if !ok || secretKey == "" { - panic("No secretKey parameter") - } - - region, ok := parameters["region"].(string) - if !ok || region == "" { - panic("No region parameter") - } - - bucket, ok := parameters["bucket"].(string) - if !ok || bucket == "" { - panic("No bucket parameter") - } - - encrypt, ok := parameters["encrypt"].(string) - if !ok { - panic("No encrypt parameter") - } - - encryptBool, err := strconv.ParseBool(encrypt) + driver, err := s3.FromParameters(parameters) if err != nil { panic(err) } - driver, err := s3.NewDriver(accessKey, secretKey, aws.GetRegion(region), encryptBool, bucket) - if err != nil { - panic(err) - } - - ipc.Server(driver) + ipc.StorageDriverServer(driver) } diff --git a/storagedriver/factory/factory.go b/storagedriver/factory/factory.go new file mode 100644 index 00000000..c13c6c1e --- /dev/null +++ b/storagedriver/factory/factory.go @@ -0,0 +1,64 @@ +package factory + +import ( + "fmt" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/ipc" +) + +// Internal mapping between storage driver names and their respective factories +var driverFactories = make(map[string]StorageDriverFactory) + +// Factory interface for the storagedriver.StorageDriver interface +// Storage drivers should call Register() with a factory to make the driver available by name +type StorageDriverFactory interface { + // Creates and returns a new storagedriver.StorageDriver with the given parameters + // Parameters will vary by driver and may be ignored + // Each parameter key must only consist of lowercase letters and numbers + Create(parameters map[string]string) (storagedriver.StorageDriver, error) +} + +// Register makes a storage driver available by the provided name. +// If Register is called twice with the same name or if driver factory is nil, it panics. +func Register(name string, factory StorageDriverFactory) { + if factory == nil { + panic("Must not provide nil StorageDriverFactory") + } + _, registered := driverFactories[name] + if registered { + panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) + } + + driverFactories[name] = factory +} + +// Create a new storagedriver.StorageDriver with the given name and parameters +// To run in-process, the StorageDriverFactory must first be registered with the given name +// If no in-process drivers are found with the given name, this attempts to create an IPC driver +// If no in-process or external drivers are found, an InvalidStorageDriverError is returned +func Create(name string, parameters map[string]string) (storagedriver.StorageDriver, error) { + driverFactory, ok := driverFactories[name] + if !ok { + // No registered StorageDriverFactory found, try ipc + driverClient, err := ipc.NewDriverClient(name, parameters) + if err != nil { + return nil, InvalidStorageDriverError{name} + } + err = driverClient.Start() + if err != nil { + return nil, err + } + return driverClient, nil + } + return driverFactory.Create(parameters) +} + +// Error returned when attempting to construct an unregistered storage driver +type InvalidStorageDriverError struct { + Name string +} + +func (err InvalidStorageDriverError) Error() string { + return fmt.Sprintf("StorageDriver not registered: %s", err.Name) +} diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go index 79106e37..27ffcf7a 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/filesystem.go @@ -8,13 +8,45 @@ import ( "strings" "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" ) +const DriverName = "filesystem" +const DefaultRootDirectory = "/tmp/registry/storage" + +func init() { + factory.Register(DriverName, &filesystemDriverFactory{}) +} + +// Implements the factory.StorageDriverFactory interface +type filesystemDriverFactory struct{} + +func (factory *filesystemDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { + return FromParameters(parameters), nil +} + +// Storage Driver backed by a local filesystem +// All provided paths will be subpaths of the RootDirectory type FilesystemDriver struct { rootDirectory string } -func NewDriver(rootDirectory string) *FilesystemDriver { +// Constructs a new FilesystemDriver with a given parameters map +// Optional Parameters: +// - rootdirectory +func FromParameters(parameters map[string]string) *FilesystemDriver { + var rootDirectory = DefaultRootDirectory + if parameters != nil { + rootDir, ok := parameters["rootdirectory"] + if ok { + rootDirectory = rootDir + } + } + return New(rootDirectory) +} + +// Constructs a new FilesystemDriver with a given rootDirectory +func New(rootDirectory string) *FilesystemDriver { return &FilesystemDriver{rootDirectory} } @@ -22,6 +54,8 @@ func (d *FilesystemDriver) subPath(subPath string) string { return path.Join(d.rootDirectory, subPath) } +// Implement the storagedriver.StorageDriver interface + func (d *FilesystemDriver) GetContent(path string) ([]byte, error) { contents, err := ioutil.ReadFile(d.subPath(path)) if err != nil { diff --git a/storagedriver/filesystem/filesystem_test.go b/storagedriver/filesystem/filesystem_test.go index 15ef9663..7eb4024c 100644 --- a/storagedriver/filesystem/filesystem_test.go +++ b/storagedriver/filesystem/filesystem_test.go @@ -17,8 +17,8 @@ func init() { os.RemoveAll(rootDirectory) filesystemDriverConstructor := func() (storagedriver.StorageDriver, error) { - return NewDriver(rootDirectory), nil + return New(rootDirectory), nil } testsuites.RegisterInProcessSuite(filesystemDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite("filesystem", map[string]string{"RootDirectory": rootDirectory}, testsuites.NeverSkip) + testsuites.RegisterIPCSuite(DriverName, map[string]string{"rootdirectory": rootDirectory}, testsuites.NeverSkip) } diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go index ea44bb39..2cf1b9f4 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/inmemory.go @@ -10,14 +10,31 @@ import ( "sync" "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" ) +const DriverName = "inmemory" + +func init() { + factory.Register(DriverName, &inMemoryDriverFactory{}) +} + +// Implements the factory.StorageDriverFactory interface +type inMemoryDriverFactory struct{} + +func (factory *inMemoryDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { + return New(), nil +} + +// InMemory Storage Driver backed by a map +// Intended solely for example and testing purposes type InMemoryDriver struct { storage map[string][]byte mutex sync.RWMutex } -func NewDriver() *InMemoryDriver { +// Constructs a new InMemoryDriver +func New() *InMemoryDriver { return &InMemoryDriver{storage: make(map[string][]byte)} } diff --git a/storagedriver/inmemory/inmemory_test.go b/storagedriver/inmemory/inmemory_test.go index accbb5f8..feea5eab 100644 --- a/storagedriver/inmemory/inmemory_test.go +++ b/storagedriver/inmemory/inmemory_test.go @@ -13,8 +13,8 @@ func Test(t *testing.T) { TestingT(t) } func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { - return NewDriver(), nil + return New(), nil } testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite("inmemory", nil, testsuites.NeverSkip) + testsuites.RegisterIPCSuite(DriverName, nil, testsuites.NeverSkip) } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 0025d2bc..6327b156 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -15,6 +15,7 @@ import ( "github.com/docker/libchan/spdy" ) +// Storage Driver implementation using a managed child process communicating over IPC type StorageDriverClient struct { subprocess *exec.Cmd socket *os.File @@ -22,6 +23,13 @@ type StorageDriverClient struct { sender libchan.Sender } +// Constructs a new out-of-process storage driver using the driver name and configuration parameters +// Must call Start() on this driver client before remote method calls can be made +// +// Looks for drivers in the following locations in order: +// - Storage drivers directory (to be determined, yet not implemented) +// - $GOPATH/bin +// - $PATH func NewDriverClient(name string, parameters map[string]string) (*StorageDriverClient, error) { paramsBytes, err := json.Marshal(parameters) if err != nil { @@ -46,6 +54,7 @@ func NewDriverClient(name string, parameters map[string]string) (*StorageDriverC }, nil } +// Starts the designated child process storage driver and binds a socket to this process for IPC func (driver *StorageDriverClient) Start() error { fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) if err != nil { @@ -93,6 +102,8 @@ func (driver *StorageDriverClient) Start() error { return nil } +// Stops the child process storage driver +// storagedriver.StorageDriver methods called after Stop() will fail func (driver *StorageDriverClient) Stop() error { closeSenderErr := driver.sender.Close() closeTransportErr := driver.transport.Close() @@ -109,6 +120,8 @@ func (driver *StorageDriverClient) Stop() error { return killErr } +// Implement the storagedriver.StorageDriver interface over IPC + func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { receiver, remoteSender := libchan.Pipe() diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 89b0cf20..4e7e65c7 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -10,12 +10,16 @@ import ( "github.com/docker/libchan" ) +// Defines a remote method call request +// A return value struct is to be sent over the ResponseChannel type Request struct { Type string Parameters map[string]interface{} ResponseChannel libchan.Sender } +// A simple wrapper around an io.ReadCloser that implements the io.ReadWriteCloser interface +// Writes are disallowed and will return an error if ever called type noWriteReadWriteCloser struct { io.ReadCloser } @@ -24,6 +28,8 @@ func (r noWriteReadWriteCloser) Write(p []byte) (n int, err error) { return 0, errors.New("Write unsupported") } +// Wraps an io.Reader as an io.ReadWriteCloser with a nop Close and unsupported Write method +// Has no effect when an io.ReadWriteCloser is passed in func WrapReader(reader io.Reader) io.ReadWriteCloser { if readWriteCloser, ok := reader.(io.ReadWriteCloser); ok { return readWriteCloser @@ -39,6 +45,7 @@ type responseError struct { Message string } +// Wraps an error in a serializable struct containing the error's type and message func ResponseError(err error) *responseError { if err == nil { return nil @@ -53,29 +60,37 @@ func (err *responseError) Error() string { return fmt.Sprintf("%s: %s", err.Type, err.Message) } +// IPC method call response object definitions + +// Response for a ReadStream request type ReadStreamResponse struct { Reader io.ReadWriteCloser Error *responseError } +// Response for a WriteStream request type WriteStreamResponse struct { Error *responseError } +// Response for a ResumeWritePosition request type ResumeWritePositionResponse struct { Position uint64 Error *responseError } +// Response for a List request type ListResponse struct { Keys []string Error *responseError } +// Response for a Move request type MoveResponse struct { Error *responseError } +// Response for a Delete request type DeleteResponse struct { Error *responseError } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 0d39a31b..0af41d0a 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -6,13 +6,18 @@ import ( "io/ioutil" "net" "os" + "reflect" "github.com/docker/docker-registry/storagedriver" "github.com/docker/libchan" "github.com/docker/libchan/spdy" ) -func Server(driver storagedriver.StorageDriver) error { +// Construct a new IPC server handling requests for the given storagedriver.StorageDriver +// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in client.go +// +// To create a new out-of-process driver, create a main package which calls StorageDriverServer with a storagedriver.StorageDriver +func StorageDriverServer(driver storagedriver.StorageDriver) error { childSocket := os.NewFile(3, "childSocket") defer childSocket.Close() conn, err := net.FileConn(childSocket) @@ -34,6 +39,9 @@ func Server(driver storagedriver.StorageDriver) error { } } +// Receives new storagedriver.StorageDriver method requests and creates a new goroutine to handle each request +// +// Requests are expected to be of type ipc.Request as the parameters are unknown until the request type is deserialized func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { for { var request Request @@ -45,6 +53,8 @@ func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { } } +// Handles storagedriver.StorageDriver method requests as defined in client.go +// Responds to requests using the Request.ResponseChannel func handleRequest(driver storagedriver.StorageDriver, request Request) { switch request.Type { case "GetContent": @@ -76,14 +86,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { panic(err) } case "ReadStream": - var offset uint64 - path, _ := request.Parameters["Path"].(string) - offset, ok := request.Parameters["Offset"].(uint64) - if !ok { - offsetSigned, _ := request.Parameters["Offset"].(int64) - offset = uint64(offsetSigned) - } + // Depending on serialization method, Offset may be convereted to any int/uint type + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(uint64(0))).Uint() reader, err := driver.ReadStream(path, offset) var response ReadStreamResponse if err != nil { @@ -96,19 +101,11 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { panic(err) } case "WriteStream": - var offset uint64 - path, _ := request.Parameters["Path"].(string) - offset, ok := request.Parameters["Offset"].(uint64) - if !ok { - offsetSigned, _ := request.Parameters["Offset"].(int64) - offset = uint64(offsetSigned) - } - size, ok := request.Parameters["Size"].(uint64) - if !ok { - sizeSigned, _ := request.Parameters["Size"].(int64) - size = uint64(sizeSigned) - } + // Depending on serialization method, Offset may be convereted to any int/uint type + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(uint64(0))).Uint() + // Depending on serialization method, Size may be convereted to any int/uint type + size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(uint64(0))).Uint() reader, _ := request.Parameters["Reader"].(io.ReadCloser) err := driver.WriteStream(path, offset, size, reader) response := WriteStreamResponse{ diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index a73e5e3d..0c301126 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "fmt" "io" "net/http" "strconv" @@ -9,21 +10,82 @@ import ( "github.com/crowdmob/goamz/aws" "github.com/crowdmob/goamz/s3" "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" ) -/* Chunks need to be at least 5MB to store with a multipart upload on S3 */ +const DriverName = "s3" + +// Chunks need to be at least 5MB to store with a multipart upload on S3 const minChunkSize = uint64(5 * 1024 * 1024) -/* The largest amount of parts you can request from S3 */ +// The largest amount of parts you can request from S3 const listPartsMax = 1000 +func init() { + factory.Register(DriverName, &s3DriverFactory{}) +} + +// Implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// Storage Driver backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket type S3Driver struct { S3 *s3.S3 Bucket *s3.Bucket Encrypt bool } -func NewDriver(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*S3Driver, error) { +// Constructs a new S3Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]string) (*S3Driver, error) { + accessKey, ok := parameters["accesskey"] + if !ok || accessKey == "" { + return nil, fmt.Errorf("No accesskey parameter provided") + } + + secretKey, ok := parameters["secretkey"] + if !ok || secretKey == "" { + return nil, fmt.Errorf("No secretkey parameter provided") + } + + regionName, ok := parameters["region"] + if !ok || regionName == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := aws.GetRegion(regionName) + if region.Name == "" { + return nil, fmt.Errorf("Invalid region provided: %s", region) + } + + bucket, ok := parameters["bucket"] + if !ok || bucket == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encrypt, ok := parameters["encrypt"] + if !ok { + return nil, fmt.Errorf("No encrypt parameter provided") + } + + encryptBool, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("Unable to parse the encrypt parameter: %v", err) + } + return New(accessKey, secretKey, region, encryptBool, bucket) +} + +// Constructs a new S3Driver with the given AWS credentials, region, encryption flag, and bucketName +func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*S3Driver, error) { auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey} s3obj := s3.New(auth, region) bucket := s3obj.Bucket(bucketName) diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go index b6862ab9..576c3623 100644 --- a/storagedriver/s3/s3_test.go +++ b/storagedriver/s3/s3_test.go @@ -2,6 +2,7 @@ package s3 import ( "os" + "strconv" "testing" "github.com/crowdmob/goamz/aws" @@ -14,23 +15,34 @@ import ( func Test(t *testing.T) { TestingT(t) } func init() { - accessKey := os.Getenv("ACCESS_KEY") - secretKey := os.Getenv("SECRET_KEY") + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") region := os.Getenv("AWS_REGION") bucket := os.Getenv("S3_BUCKET") encrypt := os.Getenv("S3_ENCRYPT") s3DriverConstructor := func() (storagedriver.StorageDriver, error) { - return NewDriver(accessKey, secretKey, aws.GetRegion(region), true, bucket) + shouldEncrypt, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + return New(accessKey, secretKey, aws.GetRegion(region), shouldEncrypt, bucket) } + // Skip S3 storage driver tests if environment variable parameters are not provided skipCheck := func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set ACCESS_KEY, SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" } return "" } testsuites.RegisterInProcessSuite(s3DriverConstructor, skipCheck) - testsuites.RegisterIPCSuite("s3", map[string]string{"accessKey": accessKey, "secretKey": secretKey, "region": region, "bucket": bucket, "encrypt": encrypt}, skipCheck) + testsuites.RegisterIPCSuite(DriverName, map[string]string{ + "accesskey": accessKey, + "secretkey": secretKey, + "region": region, + "bucket": bucket, + "encrypt": encrypt, + }, skipCheck) } diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index bfbfc110..55596cd6 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -5,17 +5,41 @@ import ( "io" ) +// Defines methods that a Storage Driver must implement for a filesystem-like key/value object storage type StorageDriver interface { + // Retrieve the content stored at "path" as a []byte + // Should primarily be used for small objects GetContent(path string) ([]byte, error) + + // Store the []byte content at a location designated by "path" + // Should primarily be used for small objects PutContent(path string, content []byte) error + + // Retrieve an io.ReadCloser for the content stored at "path" with a given byte offset + // May be used to resume reading a stream by providing a nonzero offset ReadStream(path string, offset uint64) (io.ReadCloser, error) + + // Store the contents of the provided io.ReadCloser at a location designated by "path" + // The driver will know it has received the full contents when it has read "size" bytes + // May be used to resume writing a stream by providing a nonzero offset + // The offset must be no larger than the number of bytes already written to this path WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error + + // Retrieve the byte offset at which it is safe to continue writing at "path" ResumeWritePosition(path string) (uint64, error) + + // Recursively lists the objects stored at a subpath of the given prefix List(prefix string) ([]string, error) + + // Moves an object stored at sourcePath to destPath, removing the original object + // Note: This may be no more efficient than a copy followed by a delete for many implementations Move(sourcePath string, destPath string) error + + // Recursively deletes all objects stored at "path" and its subpaths Delete(path string) error } +// Error returned when operating on a nonexistent path type PathNotFoundError struct { Path string } @@ -24,6 +48,7 @@ func (err PathNotFoundError) Error() string { return fmt.Sprintf("Path not found: %s", err.Path) } +// Error returned when attempting to read or write from an invalid offset type InvalidOffsetError struct { Path string Offset uint64 diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index ff93b038..45c621d3 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -17,6 +17,9 @@ import ( // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } +// Registers an in-process storage driver test suite with the go test runner +// +// If skipCheck returns a non-empty skip reason, the suite is skipped with the given reason func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { Suite(&DriverSuite{ Constructor: driverConstructor, @@ -24,6 +27,9 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC }) } +// Registers a storage driver test suite which runs the named driver as a child process with the given parameters +// +// If skipCheck returns a non-empty skip reason, the suite is skipped with the given reason func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { suite := &DriverSuite{ Constructor: func() (storagedriver.StorageDriver, error) { From 0e5d41ff9b7dd8a0eadbddd72beb7329c49b55fa Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 29 Oct 2014 12:14:19 -0700 Subject: [PATCH 009/165] Updates documentation to follow godoc conventions --- storagedriver/factory/factory.go | 9 +++++---- storagedriver/filesystem/filesystem.go | 9 +++++---- storagedriver/inmemory/inmemory.go | 8 +++++--- storagedriver/ipc/client.go | 15 +++++++++------ storagedriver/ipc/ipc.go | 23 ++++++++++++----------- storagedriver/ipc/server.go | 18 +++++++++++------- storagedriver/s3/s3.go | 16 ++++++++++------ storagedriver/storagedriver.go | 26 +++++++++++++++----------- storagedriver/testsuites/testsuites.go | 24 ++++++++++++------------ 9 files changed, 84 insertions(+), 64 deletions(-) diff --git a/storagedriver/factory/factory.go b/storagedriver/factory/factory.go index c13c6c1e..0b85f372 100644 --- a/storagedriver/factory/factory.go +++ b/storagedriver/factory/factory.go @@ -7,13 +7,14 @@ import ( "github.com/docker/docker-registry/storagedriver/ipc" ) -// Internal mapping between storage driver names and their respective factories +// driverFactories stores an internal mapping between storage driver names and their respective +// factories var driverFactories = make(map[string]StorageDriverFactory) -// Factory interface for the storagedriver.StorageDriver interface +// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces // Storage drivers should call Register() with a factory to make the driver available by name type StorageDriverFactory interface { - // Creates and returns a new storagedriver.StorageDriver with the given parameters + // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored // Each parameter key must only consist of lowercase letters and numbers Create(parameters map[string]string) (storagedriver.StorageDriver, error) @@ -54,7 +55,7 @@ func Create(name string, parameters map[string]string) (storagedriver.StorageDri return driverFactory.Create(parameters) } -// Error returned when attempting to construct an unregistered storage driver +// InvalidStorageDriverError records an attempt to construct an unregistered storage driver type InvalidStorageDriverError struct { Name string } diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go index 27ffcf7a..4f100dd3 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/filesystem.go @@ -18,20 +18,20 @@ func init() { factory.Register(DriverName, &filesystemDriverFactory{}) } -// Implements the factory.StorageDriverFactory interface +// filesystemDriverFactory implements the factory.StorageDriverFactory interface type filesystemDriverFactory struct{} func (factory *filesystemDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { return FromParameters(parameters), nil } -// Storage Driver backed by a local filesystem +// FilesystemDriver is a storagedriver.StorageDriver implementation backed by a local filesystem // All provided paths will be subpaths of the RootDirectory type FilesystemDriver struct { rootDirectory string } -// Constructs a new FilesystemDriver with a given parameters map +// FromParameters constructs a new FilesystemDriver with a given parameters map // Optional Parameters: // - rootdirectory func FromParameters(parameters map[string]string) *FilesystemDriver { @@ -45,11 +45,12 @@ func FromParameters(parameters map[string]string) *FilesystemDriver { return New(rootDirectory) } -// Constructs a new FilesystemDriver with a given rootDirectory +// New constructs a new FilesystemDriver with a given rootDirectory func New(rootDirectory string) *FilesystemDriver { return &FilesystemDriver{rootDirectory} } +// subPath returns the absolute path of a key within the FilesystemDriver's storage func (d *FilesystemDriver) subPath(subPath string) string { return path.Join(d.rootDirectory, subPath) } diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go index 2cf1b9f4..d7d4ccea 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/inmemory.go @@ -19,25 +19,27 @@ func init() { factory.Register(DriverName, &inMemoryDriverFactory{}) } -// Implements the factory.StorageDriverFactory interface +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface type inMemoryDriverFactory struct{} func (factory *inMemoryDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { return New(), nil } -// InMemory Storage Driver backed by a map +// InMemoryDriver is a storagedriver.StorageDriver implementation backed by a local map // Intended solely for example and testing purposes type InMemoryDriver struct { storage map[string][]byte mutex sync.RWMutex } -// Constructs a new InMemoryDriver +// New constructs a new InMemoryDriver func New() *InMemoryDriver { return &InMemoryDriver{storage: make(map[string][]byte)} } +// Implement the storagedriver.StorageDriver interface + func (d *InMemoryDriver) GetContent(path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 6327b156..cdac8b11 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -15,7 +15,8 @@ import ( "github.com/docker/libchan/spdy" ) -// Storage Driver implementation using a managed child process communicating over IPC +// StorageDriverClient is a storagedriver.StorageDriver implementation using a managed child process +// communicating over IPC using libchan with a unix domain socket type StorageDriverClient struct { subprocess *exec.Cmd socket *os.File @@ -23,8 +24,9 @@ type StorageDriverClient struct { sender libchan.Sender } -// Constructs a new out-of-process storage driver using the driver name and configuration parameters -// Must call Start() on this driver client before remote method calls can be made +// NewDriverClient constructs a new out-of-process storage driver using the driver name and +// configuration parameters +// A user must call Start on this driver client before remote method calls can be made // // Looks for drivers in the following locations in order: // - Storage drivers directory (to be determined, yet not implemented) @@ -54,7 +56,8 @@ func NewDriverClient(name string, parameters map[string]string) (*StorageDriverC }, nil } -// Starts the designated child process storage driver and binds a socket to this process for IPC +// Start starts the designated child process storage driver and binds a socket to this process for +// IPC method calls func (driver *StorageDriverClient) Start() error { fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) if err != nil { @@ -102,8 +105,8 @@ func (driver *StorageDriverClient) Start() error { return nil } -// Stops the child process storage driver -// storagedriver.StorageDriver methods called after Stop() will fail +// Stop stops the child process storage driver +// storagedriver.StorageDriver methods called after Stop will fail func (driver *StorageDriverClient) Stop() error { closeSenderErr := driver.sender.Close() closeTransportErr := driver.transport.Close() diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 4e7e65c7..30f63393 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -10,7 +10,7 @@ import ( "github.com/docker/libchan" ) -// Defines a remote method call request +// Request defines a remote method call request // A return value struct is to be sent over the ResponseChannel type Request struct { Type string @@ -18,8 +18,9 @@ type Request struct { ResponseChannel libchan.Sender } -// A simple wrapper around an io.ReadCloser that implements the io.ReadWriteCloser interface -// Writes are disallowed and will return an error if ever called +// noWriteReadWriteCloser is a simple wrapper around an io.ReadCloser that implements the +// io.ReadWriteCloser interface +// Calls to Write are disallowed and will return an error type noWriteReadWriteCloser struct { io.ReadCloser } @@ -28,7 +29,7 @@ func (r noWriteReadWriteCloser) Write(p []byte) (n int, err error) { return 0, errors.New("Write unsupported") } -// Wraps an io.Reader as an io.ReadWriteCloser with a nop Close and unsupported Write method +// WrapReader wraps an io.Reader as an io.ReadWriteCloser with a nop Close and unsupported Write // Has no effect when an io.ReadWriteCloser is passed in func WrapReader(reader io.Reader) io.ReadWriteCloser { if readWriteCloser, ok := reader.(io.ReadWriteCloser); ok { @@ -45,7 +46,7 @@ type responseError struct { Message string } -// Wraps an error in a serializable struct containing the error's type and message +// ResponseError wraps an error in a serializable struct containing the error's type and message func ResponseError(err error) *responseError { if err == nil { return nil @@ -62,35 +63,35 @@ func (err *responseError) Error() string { // IPC method call response object definitions -// Response for a ReadStream request +// ReadStreamResponse is a response for a ReadStream request type ReadStreamResponse struct { Reader io.ReadWriteCloser Error *responseError } -// Response for a WriteStream request +// WriteStreamResponse is a response for a WriteStream request type WriteStreamResponse struct { Error *responseError } -// Response for a ResumeWritePosition request +// ResumeWritePositionResponse is a response for a ResumeWritePosition request type ResumeWritePositionResponse struct { Position uint64 Error *responseError } -// Response for a List request +// ListResponse is a response for a List request type ListResponse struct { Keys []string Error *responseError } -// Response for a Move request +// MoveResponse is a response for a Move request type MoveResponse struct { Error *responseError } -// Response for a Delete request +// DeleteResponse is a response for a Delete request type DeleteResponse struct { Error *responseError } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 0af41d0a..81432cc3 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -13,10 +13,13 @@ import ( "github.com/docker/libchan/spdy" ) -// Construct a new IPC server handling requests for the given storagedriver.StorageDriver -// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in client.go +// StorageDriverServer runs a new IPC server handling requests for the given +// storagedriver.StorageDriver +// This explicitly uses file descriptor 3 for IPC communication, as storage drivers are spawned in +// client.go // -// To create a new out-of-process driver, create a main package which calls StorageDriverServer with a storagedriver.StorageDriver +// To create a new out-of-process driver, create a main package which calls StorageDriverServer with +// a storagedriver.StorageDriver func StorageDriverServer(driver storagedriver.StorageDriver) error { childSocket := os.NewFile(3, "childSocket") defer childSocket.Close() @@ -39,9 +42,10 @@ func StorageDriverServer(driver storagedriver.StorageDriver) error { } } -// Receives new storagedriver.StorageDriver method requests and creates a new goroutine to handle each request -// -// Requests are expected to be of type ipc.Request as the parameters are unknown until the request type is deserialized +// receive receives new storagedriver.StorageDriver method requests and creates a new goroutine to +// handle each request +// Requests are expected to be of type ipc.Request as the parameters are unknown until the request +// type is deserialized func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { for { var request Request @@ -53,7 +57,7 @@ func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { } } -// Handles storagedriver.StorageDriver method requests as defined in client.go +// handleRequest handles storagedriver.StorageDriver method requests as defined in client.go // Responds to requests using the Request.ResponseChannel func handleRequest(driver storagedriver.StorageDriver, request Request) { switch request.Type { diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 0c301126..5338a276 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -15,24 +15,25 @@ import ( const DriverName = "s3" -// Chunks need to be at least 5MB to store with a multipart upload on S3 +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB const minChunkSize = uint64(5 * 1024 * 1024) -// The largest amount of parts you can request from S3 +// listPartsMax is the largest amount of parts you can request from S3 const listPartsMax = 1000 func init() { factory.Register(DriverName, &s3DriverFactory{}) } -// Implements the factory.StorageDriverFactory interface +// s3DriverFactory implements the factory.StorageDriverFactory interface type s3DriverFactory struct{} func (factory *s3DriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } -// Storage Driver backed by Amazon S3 +// S3Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 // Objects are stored at absolute keys in the provided bucket type S3Driver struct { S3 *s3.S3 @@ -40,7 +41,7 @@ type S3Driver struct { Encrypt bool } -// Constructs a new S3Driver with a given parameters map +// FromParameters constructs a new S3Driver with a given parameters map // Required parameters: // - accesskey // - secretkey @@ -84,7 +85,8 @@ func FromParameters(parameters map[string]string) (*S3Driver, error) { return New(accessKey, secretKey, region, encryptBool, bucket) } -// Constructs a new S3Driver with the given AWS credentials, region, encryption flag, and bucketName +// New constructs a new S3Driver with the given AWS credentials, region, encryption flag, and +// bucketName func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*S3Driver, error) { auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey} s3obj := s3.New(auth, region) @@ -100,6 +102,8 @@ func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bu return &S3Driver{s3obj, bucket, encrypt}, nil } +// Implement the storagedriver.StorageDriver interface + func (d *S3Driver) GetContent(path string) ([]byte, error) { return d.Bucket.Get(path) } diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index 55596cd6..d03fec0c 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -5,41 +5,45 @@ import ( "io" ) -// Defines methods that a Storage Driver must implement for a filesystem-like key/value object storage +// StorageDriver defines methods that a Storage Driver must implement for a filesystem-like +// key/value object storage type StorageDriver interface { - // Retrieve the content stored at "path" as a []byte + // GetContent retrieves the content stored at "path" as a []byte // Should primarily be used for small objects GetContent(path string) ([]byte, error) - // Store the []byte content at a location designated by "path" + // PutContent stores the []byte content at a location designated by "path" // Should primarily be used for small objects PutContent(path string, content []byte) error - // Retrieve an io.ReadCloser for the content stored at "path" with a given byte offset + // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a given byte + // offset // May be used to resume reading a stream by providing a nonzero offset ReadStream(path string, offset uint64) (io.ReadCloser, error) - // Store the contents of the provided io.ReadCloser at a location designated by "path" + // WriteStream stores the contents of the provided io.ReadCloser at a location designated by + // the given path // The driver will know it has received the full contents when it has read "size" bytes // May be used to resume writing a stream by providing a nonzero offset // The offset must be no larger than the number of bytes already written to this path WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error - // Retrieve the byte offset at which it is safe to continue writing at "path" + // ResumeWritePosition retrieves the byte offset at which it is safe to continue writing at the + // given path ResumeWritePosition(path string) (uint64, error) - // Recursively lists the objects stored at a subpath of the given prefix + // List recursively lists the objects stored at a subpath of the given prefix List(prefix string) ([]string, error) - // Moves an object stored at sourcePath to destPath, removing the original object + // Move moves an object stored at sourcePath to destPath, removing the original object // Note: This may be no more efficient than a copy followed by a delete for many implementations Move(sourcePath string, destPath string) error - // Recursively deletes all objects stored at "path" and its subpaths + // Delete recursively deletes all objects stored at "path" and its subpaths Delete(path string) error } -// Error returned when operating on a nonexistent path +// PathNotFoundError is returned when operating on a nonexistent path type PathNotFoundError struct { Path string } @@ -48,7 +52,7 @@ func (err PathNotFoundError) Error() string { return fmt.Sprintf("Path not found: %s", err.Path) } -// Error returned when attempting to read or write from an invalid offset +// InvalidOffsetError is returned when attempting to read or write from an invalid offset type InvalidOffsetError struct { Path string Offset uint64 diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 45c621d3..94d85461 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -17,9 +17,7 @@ import ( // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } -// Registers an in-process storage driver test suite with the go test runner -// -// If skipCheck returns a non-empty skip reason, the suite is skipped with the given reason +// RegisterInProcessSuite registers an in-process storage driver test suite with the go test runner func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { Suite(&DriverSuite{ Constructor: driverConstructor, @@ -27,9 +25,8 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC }) } -// Registers a storage driver test suite which runs the named driver as a child process with the given parameters -// -// If skipCheck returns a non-empty skip reason, the suite is skipped with the given reason +// RegisterIPCSuite registers a storage driver test suite which runs the named driver as a child +// process with the given parameters func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { suite := &DriverSuite{ Constructor: func() (storagedriver.StorageDriver, error) { @@ -56,13 +53,21 @@ func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck Suite(suite) } +// SkipCheck is a function used to determine if a test suite should be skipped +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with the given reason type SkipCheck func() (reason string) -var NeverSkip = func() string { return "" } +// NeverSkip is a default SkipCheck which never skips the suite +var NeverSkip SkipCheck = func() string { return "" } +// DriverConstructor is a function which returns a new storagedriver.StorageDriver type DriverConstructor func() (storagedriver.StorageDriver, error) + +// DriverTeardown is a function which cleans up a suite's storagedriver.StorageDriver type DriverTeardown func() error +// DriverSuite is a gocheck test suite designed to test a storagedriver.StorageDriver +// The intended way to create a DriverSuite is with RegisterInProcessSuite or RegisterIPCSuite type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown @@ -70,11 +75,6 @@ type DriverSuite struct { storagedriver.StorageDriver } -type TestDriverConfig struct { - name string - params map[string]string -} - func (suite *DriverSuite) SetUpSuite(c *C) { if reason := suite.SkipCheck(); reason != "" { c.Skip(reason) From 3e4738587f31b47edb79d361ed370cb2ae6d41fd Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 30 Oct 2014 11:42:59 -0700 Subject: [PATCH 010/165] Adds README for the storagedriver package --- storagedriver/README.md | 47 ++++++++++++++++++++++++++++++++++ storagedriver/storagedriver.go | 2 +- 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 storagedriver/README.md diff --git a/storagedriver/README.md b/storagedriver/README.md new file mode 100644 index 00000000..f2795834 --- /dev/null +++ b/storagedriver/README.md @@ -0,0 +1,47 @@ +Docker-Registry Storage Driver +============================== + +This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. + +Provided Drivers +================ + +This storage driver package comes bundled with three default drivers. + +1. filesystem: A local storage driver configured to use a directory tree in the local filesystem. +2. s3: A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. +3. inmemory: A temporary storage driver using a local inmemory map. This exists solely for reference and testing. + +Storage Driver API +================== + +The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. + +Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. + +Storage drivers are intended (but not required) to be written in go, providing compile-time validation of the `storagedriver.StorageDriver` interface, although an IPC driver wrapper means that it is not required for drivers to be included in the compiled registry. The `storagedriver/ipc` package provides a client/server protocol for running storage drivers provided in external executables as a managed child server process. + +Driver Selection and Configuration +================================== + +The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. + +Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the same name and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. + +Driver Contribution +=================== + +## Writing new storage drivers +To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system and as a distributable IPC server executable. + +### In-process drivers +Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. + +### Out-of-process drivers +As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `main/storagedriver/filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. + +## Testing +Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. + +## Drivers written in other languages +Although storage drivers are strongly recommended to be written in go for consistency, compile-time validation, and support, the IPC framework allows for a level of language-agnosticism. Non-go drivers must implement the storage driver protocol by mimicing StorageDriverServer in `storagedriver/ipc/server.go`. As the IPC framework is a layer on top of [docker/libchan](https://github.com/docker/libchan), this currently limits language support to Java via [ndeloof/chan](https://github.com/ndeloof/jchan) and Javascript via [GraftJS/jschan](https://github.com/GraftJS/jschan), although contributions to the libchan project are welcome. diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index d03fec0c..57e34c0d 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -25,7 +25,7 @@ type StorageDriver interface { // the given path // The driver will know it has received the full contents when it has read "size" bytes // May be used to resume writing a stream by providing a nonzero offset - // The offset must be no larger than the number of bytes already written to this path + // The offset must be no larger than the ResumeWritePosition for this path WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error // ResumeWritePosition retrieves the byte offset at which it is safe to continue writing at the From b522fbd67507b0121a45ab9337cd5adfb2b501db Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 31 Oct 2014 11:50:02 -0700 Subject: [PATCH 011/165] Removes WrapReader boilerplate for updates to libchan libchan now supports io.ReadCloser and io.WriteCloser, so we don't need io.ReadWriteCloser wrapping --- storagedriver/ipc/client.go | 4 ++-- storagedriver/ipc/ipc.go | 27 +-------------------------- storagedriver/ipc/server.go | 4 ++-- 3 files changed, 5 insertions(+), 30 deletions(-) diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index cdac8b11..54090945 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -155,7 +155,7 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Reader": WrapReader(bytes.NewReader(contents))} + params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} err := driver.sender.Send(&Request{Type: "PutContent", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err @@ -199,7 +199,7 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": WrapReader(reader)} + params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": ioutil.NopCloser(reader)} err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 30f63393..9c6b1dc0 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -1,10 +1,8 @@ package ipc import ( - "errors" "fmt" "io" - "io/ioutil" "reflect" "github.com/docker/libchan" @@ -18,29 +16,6 @@ type Request struct { ResponseChannel libchan.Sender } -// noWriteReadWriteCloser is a simple wrapper around an io.ReadCloser that implements the -// io.ReadWriteCloser interface -// Calls to Write are disallowed and will return an error -type noWriteReadWriteCloser struct { - io.ReadCloser -} - -func (r noWriteReadWriteCloser) Write(p []byte) (n int, err error) { - return 0, errors.New("Write unsupported") -} - -// WrapReader wraps an io.Reader as an io.ReadWriteCloser with a nop Close and unsupported Write -// Has no effect when an io.ReadWriteCloser is passed in -func WrapReader(reader io.Reader) io.ReadWriteCloser { - if readWriteCloser, ok := reader.(io.ReadWriteCloser); ok { - return readWriteCloser - } else if readCloser, ok := reader.(io.ReadCloser); ok { - return noWriteReadWriteCloser{readCloser} - } else { - return noWriteReadWriteCloser{ioutil.NopCloser(reader)} - } -} - type responseError struct { Type string Message string @@ -65,7 +40,7 @@ func (err *responseError) Error() string { // ReadStreamResponse is a response for a ReadStream request type ReadStreamResponse struct { - Reader io.ReadWriteCloser + Reader io.ReadCloser Error *responseError } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 81432cc3..989b44ba 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -68,7 +68,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { if err != nil { response = ReadStreamResponse{Error: ResponseError(err)} } else { - response = ReadStreamResponse{Reader: WrapReader(bytes.NewReader(content))} + response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} } err = request.ResponseChannel.Send(&response) if err != nil { @@ -98,7 +98,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { if err != nil { response = ReadStreamResponse{Error: ResponseError(err)} } else { - response = ReadStreamResponse{Reader: WrapReader(reader)} + response = ReadStreamResponse{Reader: ioutil.NopCloser(reader)} } err = request.ResponseChannel.Send(&response) if err != nil { From 43716a28508cec2888337ac70fcfb8c19c84788c Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 3 Nov 2014 16:20:38 -0800 Subject: [PATCH 012/165] Uses IsTruncated and NextMarker for S3 list internal pagination --- storagedriver/s3/s3.go | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 5338a276..ea13b87c 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -161,7 +161,6 @@ func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadC } else { part, err := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:bytesRead])) if err != nil { - return err } @@ -192,7 +191,10 @@ func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) { } func (d *S3Driver) List(prefix string) ([]string, error) { - listResponse, err := d.Bucket.List(prefix+"/", "/", "", listPartsMax) + if prefix[len(prefix)-1] != '/' { + prefix = prefix + "/" + } + listResponse, err := d.Bucket.List(prefix, "/", "", listPartsMax) if err != nil { return nil, err } @@ -200,7 +202,7 @@ func (d *S3Driver) List(prefix string) ([]string, error) { files := []string{} directories := []string{} - for len(listResponse.Contents) > 0 || len(listResponse.CommonPrefixes) > 0 { + for { for _, key := range listResponse.Contents { files = append(files, key.Key) } @@ -209,27 +211,13 @@ func (d *S3Driver) List(prefix string) ([]string, error) { directories = append(directories, commonPrefix[0:len(commonPrefix)-1]) } - lastFile := "" - lastDirectory := "" - lastMarker := "" - - if len(files) > 0 { - lastFile = files[len(files)-1] - } - - if len(directories) > 0 { - lastDirectory = directories[len(directories)-1] + "/" - } - - if lastDirectory > lastFile { - lastMarker = lastDirectory + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(prefix, "/", listResponse.NextMarker, listPartsMax) + if err != nil { + return nil, err + } } else { - lastMarker = lastFile - } - - listResponse, err = d.Bucket.List(prefix+"/", "/", lastMarker, listPartsMax) - if err != nil { - return nil, err + break } } From 7daa850d44dd2154e05ff5d178c8f0f3667a6119 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 4 Nov 2014 09:52:24 -0800 Subject: [PATCH 013/165] Fixes documentation to show that StorageDriver.List is non-recursive --- storagedriver/filesystem/filesystem.go | 8 ++++---- storagedriver/inmemory/inmemory.go | 4 ++-- storagedriver/ipc/client.go | 4 ++-- storagedriver/ipc/server.go | 4 ++-- storagedriver/storagedriver.go | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go index 4f100dd3..0bdf6017 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/filesystem.go @@ -166,9 +166,9 @@ func (d *FilesystemDriver) ResumeWritePosition(subPath string) (uint64, error) { return uint64(fileInfo.Size()), nil } -func (d *FilesystemDriver) List(prefix string) ([]string, error) { - prefix = strings.TrimRight(prefix, "/") - fullPath := d.subPath(prefix) +func (d *FilesystemDriver) List(subPath string) ([]string, error) { + subPath = strings.TrimRight(subPath, "/") + fullPath := d.subPath(subPath) dir, err := os.Open(fullPath) if err != nil { @@ -182,7 +182,7 @@ func (d *FilesystemDriver) List(prefix string) ([]string, error) { keys := make([]string, 0, len(fileNames)) for _, fileName := range fileNames { - keys = append(keys, path.Join(prefix, fileName)) + keys = append(keys, path.Join(subPath, fileName)) } return keys, nil diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go index d7d4ccea..9b9fd947 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/inmemory.go @@ -110,8 +110,8 @@ func (d *InMemoryDriver) ResumeWritePosition(path string) (uint64, error) { return uint64(len(contents)), nil } -func (d *InMemoryDriver) List(prefix string) ([]string, error) { - subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s/[^/]+", prefix)) +func (d *InMemoryDriver) List(path string) ([]string, error) { + subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s/[^/]+", path)) if err != nil { return nil, err } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 54090945..fd5f15c3 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -240,10 +240,10 @@ func (driver *StorageDriverClient) ResumeWritePosition(path string) (uint64, err return response.Position, nil } -func (driver *StorageDriverClient) List(prefix string) ([]string, error) { +func (driver *StorageDriverClient) List(path string) ([]string, error) { receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Prefix": prefix} + params := map[string]interface{}{"Path": path} err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return nil, err diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 989b44ba..d73be2f6 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -131,8 +131,8 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { panic(err) } case "List": - prefix, _ := request.Parameters["Prefix"].(string) - keys, err := driver.List(prefix) + path, _ := request.Parameters["Path"].(string) + keys, err := driver.List(path) response := ListResponse{ Keys: keys, Error: ResponseError(err), diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index 57e34c0d..a66dba0c 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -32,8 +32,8 @@ type StorageDriver interface { // given path ResumeWritePosition(path string) (uint64, error) - // List recursively lists the objects stored at a subpath of the given prefix - List(prefix string) ([]string, error) + // List returns a list of the objects that are direct descendants of the given path + List(path string) ([]string, error) // Move moves an object stored at sourcePath to destPath, removing the original object // Note: This may be no more efficient than a copy followed by a delete for many implementations From 0ad4bba103bd672c4b553d92b5f22fadebe6267f Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 27 Oct 2014 16:16:19 -0700 Subject: [PATCH 014/165] Initial configuration parser --- configuration/configuration.go | 194 ++++++++++++++++++++++++++++ configuration/configuration_test.go | 171 ++++++++++++++++++++++++ 2 files changed, 365 insertions(+) create mode 100644 configuration/configuration.go create mode 100644 configuration/configuration_test.go diff --git a/configuration/configuration.go b/configuration/configuration.go new file mode 100644 index 00000000..04135bbc --- /dev/null +++ b/configuration/configuration.go @@ -0,0 +1,194 @@ +package configuration + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "gopkg.in/yaml.v2" +) + +var CurrentVersion = Version{Major: 0, Minor: 1} + +type Configuration struct { + Version Version `yaml:"version"` + Registry Registry `yaml:"registry"` +} + +type Version struct { + Major uint + Minor uint +} + +func (version Version) String() string { + return fmt.Sprintf("%d.%d", version.Major, version.Minor) +} + +func (version Version) MarshalYAML() (interface{}, error) { + return version.String(), nil +} + +type Registry struct { + LogLevel string + Storage Storage +} + +type Storage struct { + Type string + Parameters map[string]string +} + +func (storage Storage) MarshalYAML() (interface{}, error) { + return yaml.MapSlice{yaml.MapItem{storage.Type, storage.Parameters}}, nil +} + +type untypedConfiguration struct { + Version string `yaml:"version"` + Registry interface{} `yaml:"registry"` +} + +type v_0_1_RegistryConfiguration struct { + LogLevel string `yaml:"loglevel"` + Storage interface{} `yaml:"storage"` +} + +func Parse(in []byte) (*Configuration, error) { + var untypedConfig untypedConfiguration + var config Configuration + + err := yaml.Unmarshal(in, &untypedConfig) + if err != nil { + return nil, err + } + if untypedConfig.Version == "" { + return nil, fmt.Errorf("Please specify a configuration version. Current version is %s", CurrentVersion) + } + versionParts := strings.Split(untypedConfig.Version, ".") + if len(versionParts) != 2 { + return nil, fmt.Errorf("Invalid version: %s Expected format: X.Y", untypedConfig.Version) + } + majorVersion, err := strconv.ParseUint(versionParts[0], 10, 0) + if err != nil { + return nil, fmt.Errorf("Major version must be of type uint, received %v", versionParts[0]) + } + minorVersion, err := strconv.ParseUint(versionParts[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("Minor version must be of type uint, received %v", versionParts[1]) + } + config.Version = Version{Major: uint(majorVersion), Minor: uint(minorVersion)} + + switch config.Version { + case Version{0, 1}: + registry, err := parseV_0_1_Registry(untypedConfig.Registry) + if err != nil { + return nil, err + } + + config.Registry = *registry + default: + return nil, fmt.Errorf("Unsupported configuration version %s Current version is %s", config.Version, CurrentVersion) + } + + switch config.Registry.LogLevel { + case "error", "warn", "info", "debug": + default: + return nil, fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", config.Registry.LogLevel) + } + + return &config, nil +} + +func parseV_0_1_Registry(registry interface{}) (*Registry, error) { + envMap := getEnvMap() + + registryBytes, err := yaml.Marshal(registry) + if err != nil { + return nil, err + } + var v_0_1 v_0_1_RegistryConfiguration + err = yaml.Unmarshal(registryBytes, &v_0_1) + if err != nil { + return nil, err + } + + if logLevel, ok := envMap["REGISTRY_LOGLEVEL"]; ok { + v_0_1.LogLevel = logLevel + } + v_0_1.LogLevel = strings.ToLower(v_0_1.LogLevel) + + var storage Storage + storage.Parameters = make(map[string]string) + + switch v_0_1.Storage.(type) { + case string: + storage.Type = v_0_1.Storage.(string) + case map[interface{}]interface{}: + storageMap := v_0_1.Storage.(map[interface{}]interface{}) + if len(storageMap) > 1 { + keys := make([]string, 0, len(storageMap)) + for key := range storageMap { + keys = append(keys, toString(key)) + } + return nil, fmt.Errorf("Must provide exactly one storage type. Provided: %v", keys) + } + var params map[interface{}]interface{} + // There will only be one key-value pair at this point + for k, v := range storageMap { + storage.Type = toString(k) + paramsMap, ok := v.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf("Must provide parameters as a map[string]string. Provided: %#v", v) + } + params = paramsMap + } + for k, v := range params { + storage.Parameters[toString(k)] = toString(v) + } + + case interface{}: + // Bad type for storage + return nil, fmt.Errorf("Registry storage must be provided by name, optionally with parameters. Provided: %v", v_0_1.Storage) + } + + if storageType, ok := envMap["REGISTRY_STORAGE"]; ok { + if storageType != storage.Type { + storage.Type = storageType + // Reset the storage parameters because we're using a different storage type + storage.Parameters = make(map[string]string) + } + } + + if storage.Type == "" { + return nil, fmt.Errorf("Must provide exactly one storage type, optionally with parameters. Provided: %v", v_0_1.Storage) + } + + storageParamsRegexp, err := regexp.Compile(fmt.Sprintf("^REGISTRY_STORAGE_%s_([A-Z0-9]+)$", strings.ToUpper(storage.Type))) + if err != nil { + return nil, err + } + for k, v := range envMap { + if submatches := storageParamsRegexp.FindStringSubmatch(k); submatches != nil { + storage.Parameters[strings.ToLower(submatches[1])] = v + } + } + + return &Registry{LogLevel: v_0_1.LogLevel, Storage: storage}, nil +} + +func getEnvMap() map[string]string { + envMap := make(map[string]string) + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + envMap[envParts[0]] = envParts[1] + } + return envMap +} + +func toString(v interface{}) string { + if v == nil { + return "" + } + return fmt.Sprint(v) +} diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go new file mode 100644 index 00000000..53dc43a7 --- /dev/null +++ b/configuration/configuration_test.go @@ -0,0 +1,171 @@ +package configuration + +import ( + "os" + "testing" + + "gopkg.in/yaml.v2" + + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner +func Test(t *testing.T) { TestingT(t) } + +var configStruct = Configuration{ + Version: Version{ + Major: 0, + Minor: 1, + }, + Registry: Registry{ + LogLevel: "info", + Storage: Storage{ + Type: "s3", + Parameters: map[string]string{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootpath": "/registry", + "encrypt": "true", + "secure": "false", + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": "", + "port": "", + }, + }, + }, +} + +var configYamlV_0_1 = ` +version: 0.1 + +registry: + loglevel: info + storage: + s3: + region: us-east-1 + bucket: my-bucket + rootpath: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: ~ +` + +type ConfigSuite struct { + expectedConfig *Configuration +} + +var _ = Suite(new(ConfigSuite)) + +func (suite *ConfigSuite) SetUpTest(c *C) { + os.Clearenv() + suite.expectedConfig = copyConfig(configStruct) +} + +func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + config, err := Parse(configBytes) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseSimple(c *C) { + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + os.Setenv("REGISTRY_STORAGE", "s3") + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { + suite.expectedConfig.Registry.Storage.Parameters["region"] = "us-west-1" + suite.expectedConfig.Registry.Storage.Parameters["secure"] = "true" + suite.expectedConfig.Registry.Storage.Parameters["newparam"] = "some Value" + + os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") + os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") + os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { + suite.expectedConfig.Registry.Storage = Storage{Type: "inmemory", Parameters: map[string]string{}} + + os.Setenv("REGISTRY_STORAGE", "inmemory") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { + suite.expectedConfig.Registry.Storage = Storage{Type: "filesystem", Parameters: map[string]string{}} + suite.expectedConfig.Registry.Storage.Parameters["rootdirectory"] = "/tmp/testroot" + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { + os.Setenv("REGISTRY_LOGLEVEL", "info") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { + suite.expectedConfig.Registry.LogLevel = "error" + + os.Setenv("REGISTRY_LOGLEVEL", "error") + + config, err := Parse([]byte(configYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { + suite.expectedConfig.Version = Version{Major: CurrentVersion.Major, Minor: CurrentVersion.Minor + 1} + configBytes, err := yaml.Marshal(suite.expectedConfig) + c.Assert(err, IsNil) + _, err = Parse(configBytes) + c.Assert(err, NotNil) +} + +func copyConfig(config Configuration) *Configuration { + configCopy := new(Configuration) + + configCopy.Version = *new(Version) + configCopy.Version.Major = config.Version.Major + configCopy.Version.Minor = config.Version.Minor + + configCopy.Registry = *new(Registry) + configCopy.Registry.LogLevel = config.Registry.LogLevel + + configCopy.Registry.Storage = *new(Storage) + configCopy.Registry.Storage.Type = config.Registry.Storage.Type + configCopy.Registry.Storage.Parameters = make(map[string]string) + for k, v := range config.Registry.Storage.Parameters { + configCopy.Registry.Storage.Parameters[k] = v + } + + return configCopy +} From 2013ef5a5b7f779fd6a5a88f29ee2ed8f10aec62 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 29 Oct 2014 17:42:23 -0700 Subject: [PATCH 015/165] Adds documentation for the configuration parser and tests --- configuration/configuration.go | 69 ++++++++++++++++++++++++++--- configuration/configuration_test.go | 21 +++++++++ 2 files changed, 83 insertions(+), 7 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index 04135bbc..15481559 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -10,13 +10,19 @@ import ( "gopkg.in/yaml.v2" ) +// CurrentVersion is the most recent Version that can be parsed var CurrentVersion = Version{Major: 0, Minor: 1} +// Configuration is a versioned system configuration +// When marshaled into yaml, this produces a document matching the current version's format type Configuration struct { Version Version `yaml:"version"` Registry Registry `yaml:"registry"` } +// Version is a major/minor version pair +// Minor version upgrades should be strictly additive +// Major version upgrades indicate structure or type changes type Version struct { Major uint Minor uint @@ -26,17 +32,26 @@ func (version Version) String() string { return fmt.Sprintf("%d.%d", version.Major, version.Minor) } +// MarshalYAML is implemented to serialize the Version into a string format func (version Version) MarshalYAML() (interface{}, error) { return version.String(), nil } +// Registry defines the configuration for a registry type Registry struct { + // LogLevel specifies the level at which the registry will be logged LogLevel string - Storage Storage + + // Storage specifies the configuration of the registry's object storage + Storage Storage } +// Storage defines the configuration for registry object storage type Storage struct { - Type string + // Type specifies the storage driver type (examples: inmemory, filesystem, s3, ...) + Type string + + // Parameters specifies the key/value parameters map passed to the storage driver constructor Parameters map[string]string } @@ -44,16 +59,44 @@ func (storage Storage) MarshalYAML() (interface{}, error) { return yaml.MapSlice{yaml.MapItem{storage.Type, storage.Parameters}}, nil } +// untypedConfiguration is the unmarshalable configuration struct that only assumes the existence of +// a version string parameter +// This is done to parse the configuration version, then parse the remainder with a version-specific +// parser type untypedConfiguration struct { - Version string `yaml:"version"` + // Version is the version string defined in a configuration yaml + // This can safely parse versions defined as float types in yaml + Version string `yaml:"version"` + + // Registry is an untyped placeholder for the Registry configuration, which can later be parsed + // into a current Registry struct Registry interface{} `yaml:"registry"` } -type v_0_1_RegistryConfiguration struct { - LogLevel string `yaml:"loglevel"` - Storage interface{} `yaml:"storage"` +// V_0_1_RegistryConfiguration is the unmarshalable Registry configuration struct specific to +// Version{0, 1} +type V_0_1_RegistryConfiguration struct { + // LogLevel is the level at which the registry will log + // The loglevel can be overridden with the environment variable REGISTRY_LOGLEVEL, for example: + // REGISTRY_LOGLEVEL=info + LogLevel string `yaml:"loglevel"` + + // Storage is an untyped placeholder for the Storage configuration, which can later be parsed as + // a Storage struct + // The storage type can be overridden with the environment variable REGISTRY_STORAGE, for + // example: REGISTRY_STORAGE=s3 + // Note: If REGISTRY_STORAGE changes the storage type, all included parameters will be ignored + // The storage parameters can be overridden with any environment variable of the format: + // REGISTRY_STORAGE__, for example: + // REGISTRY_STORAGE_S3_BUCKET=my-bucket + Storage interface{} `yaml:"storage"` } +// Parse parses an input configuration yaml document into a Configuration struct +// This should be capable of handling old configuration format versions +// +// Environment variables may be used to override configuration parameters other than version, which +// may be defined on a per-version basis. See V_0_1_RegistryConfiguration for more details func Parse(in []byte) (*Configuration, error) { var untypedConfig untypedConfiguration var config Configuration @@ -65,6 +108,8 @@ func Parse(in []byte) (*Configuration, error) { if untypedConfig.Version == "" { return nil, fmt.Errorf("Please specify a configuration version. Current version is %s", CurrentVersion) } + + // Convert the version string from X.Y to Version{X, Y} versionParts := strings.Split(untypedConfig.Version, ".") if len(versionParts) != 2 { return nil, fmt.Errorf("Invalid version: %s Expected format: X.Y", untypedConfig.Version) @@ -79,6 +124,7 @@ func Parse(in []byte) (*Configuration, error) { } config.Version = Version{Major: uint(majorVersion), Minor: uint(minorVersion)} + // Parse the remainder of the configuration depending on the provided version switch config.Version { case Version{0, 1}: registry, err := parseV_0_1_Registry(untypedConfig.Registry) @@ -100,6 +146,7 @@ func Parse(in []byte) (*Configuration, error) { return &config, nil } +// parseV_0_1_Registry parses a Registry configuration for Version{0, 1} func parseV_0_1_Registry(registry interface{}) (*Registry, error) { envMap := getEnvMap() @@ -107,7 +154,7 @@ func parseV_0_1_Registry(registry interface{}) (*Registry, error) { if err != nil { return nil, err } - var v_0_1 v_0_1_RegistryConfiguration + var v_0_1 V_0_1_RegistryConfiguration err = yaml.Unmarshal(registryBytes, &v_0_1) if err != nil { return nil, err @@ -123,8 +170,10 @@ func parseV_0_1_Registry(registry interface{}) (*Registry, error) { switch v_0_1.Storage.(type) { case string: + // Storage is provided only by type storage.Type = v_0_1.Storage.(string) case map[interface{}]interface{}: + // Storage is provided as a {type: parameters} map storageMap := v_0_1.Storage.(map[interface{}]interface{}) if len(storageMap) > 1 { keys := make([]string, 0, len(storageMap)) @@ -136,6 +185,8 @@ func parseV_0_1_Registry(registry interface{}) (*Registry, error) { var params map[interface{}]interface{} // There will only be one key-value pair at this point for k, v := range storageMap { + // Parameters may be parsed as numerical or boolean values, so just convert these to + // strings storage.Type = toString(k) paramsMap, ok := v.(map[interface{}]interface{}) if !ok { @@ -164,6 +215,8 @@ func parseV_0_1_Registry(registry interface{}) (*Registry, error) { return nil, fmt.Errorf("Must provide exactly one storage type, optionally with parameters. Provided: %v", v_0_1.Storage) } + // Find all environment variables of the format: + // REGISTRY_STORAGE__ storageParamsRegexp, err := regexp.Compile(fmt.Sprintf("^REGISTRY_STORAGE_%s_([A-Z0-9]+)$", strings.ToUpper(storage.Type))) if err != nil { return nil, err @@ -177,6 +230,7 @@ func parseV_0_1_Registry(registry interface{}) (*Registry, error) { return &Registry{LogLevel: v_0_1.LogLevel, Storage: storage}, nil } +// getEnvMap reads the current environment variables and converts these into a key/value map func getEnvMap() map[string]string { envMap := make(map[string]string) for _, env := range os.Environ() { @@ -186,6 +240,7 @@ func getEnvMap() map[string]string { return envMap } +// toString converts reasonable objects into strings that may be used for configuration parameters func toString(v interface{}) string { if v == nil { return "" diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 53dc43a7..8be767fb 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -12,6 +12,7 @@ import ( // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } +// configStruct is a canonical example configuration, which should map to configYamlV_0_1 var configStruct = Configuration{ Version: Version{ Major: 0, @@ -36,6 +37,7 @@ var configStruct = Configuration{ }, } +// configYamlV_0_1 is a Version{0, 1} yaml document representing configStruct var configYamlV_0_1 = ` version: 0.1 @@ -65,6 +67,8 @@ func (suite *ConfigSuite) SetUpTest(c *C) { suite.expectedConfig = copyConfig(configStruct) } +// TestMarshalRoundtrip validates that configStruct can be marshaled and unmarshaled without +// changing any parameters func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) @@ -73,12 +77,15 @@ func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseSimple validates that configYamlV_0_1 can be parsed into a struct matching configStruct func (suite *ConfigSuite) TestParseSimple(c *C) { config, err := Parse([]byte(configYamlV_0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithSameEnvStorage validates that providing environment variables that match the given +// storage type and parameters will not alter the parsed Configuration struct func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") @@ -88,6 +95,9 @@ func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change +// and add to the given storage parameters will change and add parameters to the parsed +// Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { suite.expectedConfig.Registry.Storage.Parameters["region"] = "us-west-1" suite.expectedConfig.Registry.Storage.Parameters["secure"] = "true" @@ -102,6 +112,8 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithDifferentEnvStorageType validates that providing an environment variable that +// changes the storage type will be reflected in the parsed Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { suite.expectedConfig.Registry.Storage = Storage{Type: "inmemory", Parameters: map[string]string{}} @@ -112,6 +124,9 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable +// that changes the storage type will be reflected in the parsed Configuration struct and that +// environment storage parameters will also be included func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { suite.expectedConfig.Registry.Storage = Storage{Type: "filesystem", Parameters: map[string]string{}} suite.expectedConfig.Registry.Storage.Parameters["rootdirectory"] = "/tmp/testroot" @@ -124,6 +139,8 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log +// level to the same as the one provided in the yaml will not change the parsed Configuration struct func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "info") @@ -132,6 +149,8 @@ func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the +// log level will override the value provided in the yaml document func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { suite.expectedConfig.Registry.LogLevel = "error" @@ -142,6 +161,8 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration +// version than the CurrentVersion func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { suite.expectedConfig.Version = Version{Major: CurrentVersion.Major, Minor: CurrentVersion.Minor + 1} configBytes, err := yaml.Marshal(suite.expectedConfig) From 2b51a8ab439cec229ce2adcd888a73c4657efa7f Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 30 Oct 2014 16:27:22 -0700 Subject: [PATCH 016/165] Adds README.md to the configuration package --- configuration/README.md | 80 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 configuration/README.md diff --git a/configuration/README.md b/configuration/README.md new file mode 100644 index 00000000..edc8eccf --- /dev/null +++ b/configuration/README.md @@ -0,0 +1,80 @@ +Docker-Registry Configuration +============================= + +This document describes the registry configuration model and how to specify a custom configuration with a configuration file and/or environment variables. + +Semantic-ish Versioning +----------------------- + +The configuration file is designed with versioning in mind, such that most upgrades will not require a change in configuration files, and such that configuration files can be "upgraded" from one version to another. + +The version is specified as a string of the form `MajorVersion.MinorVersion`, where MajorVersion and MinorVersion are both non-negative integer values. Much like [semantic versioning](http://semver.org/), minor version increases denote inherently backwards-compatible changes, such as the addition of optional fields, whereas major version increases denote a restructuring, such as renaming fields or adding required fields. Because of the explicit version definition in the configuration file, it should be possible to parse old configuration files and port them to the current configuration version, although this is not guaranteed for all future versions. + +File Structure (as of Version 0.1) +------------------------------------ + +The configuration structure is defined in `configuration.go`, and is best described by the following two examples: + +```yaml +version: 0.1 + +registry: + loglevel: info + storage: + s3: + region: us-east-1 + bucket: my-bucket + rootpath: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: ~ +``` + +```yaml +version: 0.1 + +registry: + loglevel: debug + storage: inmemory +``` + +### version +The version is expected to remain a top-level field, as to allow for a consistent version check before parsing the remainder of the configuration file. + +### registry +The registry configuration consists of two fields: `loglevel` and `storage` + +#### loglevel +This specifies the log level of the registry. + +Supported values: +* `error` +* `warn` +* `info` +* `debug` + +#### storage +This specifies the storage driver, and may be provided either as a string (only the driver type) or as a driver name with a parameters map, as seen in the first example above. + +The parameters map will be passed into the factory constructor of the given storage driver type. + +### Notes + +All keys in the configuration file **must** be provided as a string of lowercase letters and numbers only, and values must be string-like (booleans and numerical values are fine to parse as strings). + +Environment Variables +--------------------- + +To support the workflow of running a docker registry from a standard container without having to modify configuration files, the registry configuration also supports environment variables for overriding fields. + +Any field that is a descendent of `registry` can be replaced by providing an environment variable of the following form: `REGISTRY_[_]...`. + +For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`. + +### Notes +If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, changing the storage type will remove all parameters related to the old storage type. + +By restricting all keys in the configuration file to lowercase letters and numbers, we can avoid any potential environment variable mapping ambiguity. From 96d26842f8e251743c496e8ff8db22cae9752391 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 4 Nov 2014 09:41:32 -0800 Subject: [PATCH 017/165] Refactors configuration parser, removes Registry level from config file Most conditional parsing code has been moved into UnmarshalYAML functions for simplicity Uses the BrianBland fork of goyaml in configuration.go temporarily until fix https://github.com/go-yaml/yaml/pull/52 is merged in --- configuration/README.md | 43 ++-- configuration/configuration.go | 335 +++++++++++++++------------- configuration/configuration_test.go | 109 ++++----- 3 files changed, 250 insertions(+), 237 deletions(-) diff --git a/configuration/README.md b/configuration/README.md index edc8eccf..03ac8ab3 100644 --- a/configuration/README.md +++ b/configuration/README.md @@ -13,41 +13,34 @@ The version is specified as a string of the form `MajorVersion.MinorVersion`, wh File Structure (as of Version 0.1) ------------------------------------ -The configuration structure is defined in `configuration.go`, and is best described by the following two examples: +The configuration structure is defined by the `Configuration` struct in `configuration.go`, and is best described by the following two examples: ```yaml version: 0.1 - -registry: - loglevel: info - storage: - s3: - region: us-east-1 - bucket: my-bucket - rootpath: /registry - encrypt: true - secure: false - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: ~ - port: ~ +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootpath: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: ~ ``` ```yaml version: 0.1 - -registry: - loglevel: debug - storage: inmemory +loglevel: debug +storage: inmemory ``` ### version The version is expected to remain a top-level field, as to allow for a consistent version check before parsing the remainder of the configuration file. -### registry -The registry configuration consists of two fields: `loglevel` and `storage` - -#### loglevel +### loglevel This specifies the log level of the registry. Supported values: @@ -56,7 +49,7 @@ Supported values: * `info` * `debug` -#### storage +### storage This specifies the storage driver, and may be provided either as a string (only the driver type) or as a driver name with a parameters map, as seen in the first example above. The parameters map will be passed into the factory constructor of the given storage driver type. @@ -70,7 +63,7 @@ Environment Variables To support the workflow of running a docker registry from a standard container without having to modify configuration files, the registry configuration also supports environment variables for overriding fields. -Any field that is a descendent of `registry` can be replaced by providing an environment variable of the following form: `REGISTRY_[_]...`. +Any configuration field other than version can be replaced by providing an environment variable of the following form: `REGISTRY_[_]...`. For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`. diff --git a/configuration/configuration.go b/configuration/configuration.go index 15481559..901a2571 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -7,99 +7,182 @@ import ( "strconv" "strings" - "gopkg.in/yaml.v2" + "gopkg.in/BrianBland/yaml.v2" ) -// CurrentVersion is the most recent Version that can be parsed -var CurrentVersion = Version{Major: 0, Minor: 1} - -// Configuration is a versioned system configuration -// When marshaled into yaml, this produces a document matching the current version's format +// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and +// optionally modified by environment variables type Configuration struct { + // Version is the version which defines the format of the rest of the configuration Version Version `yaml:"version"` - Registry Registry `yaml:"registry"` + + // Loglevel is the level at which registry operations are logged + Loglevel Loglevel `yaml:"loglevel"` + + // Storage is the configuration for the registry's storage driver + Storage Storage `yaml:"storage"` } -// Version is a major/minor version pair -// Minor version upgrades should be strictly additive +// v_0_1_Configuration is a Version 0.1 Configuration struct +// This is currently aliased to Configuration, as it is the current version +type v_0_1_Configuration Configuration + +// Version is a major/minor version pair of the form Major.Minor // Major version upgrades indicate structure or type changes -type Version struct { - Major uint - Minor uint +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) } -func (version Version) String() string { - return fmt.Sprintf("%d.%d", version.Major, version.Minor) +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err } -// MarshalYAML is implemented to serialize the Version into a string format -func (version Version) MarshalYAML() (interface{}, error) { - return version.String(), nil +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major } -// Registry defines the configuration for a registry -type Registry struct { - // LogLevel specifies the level at which the registry will be logged - LogLevel string +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} - // Storage specifies the configuration of the registry's object storage - Storage Storage +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var versionString string + err := unmarshal(&versionString) + if err != nil { + return err + } + + newVersion := Version(versionString) + if _, err := newVersion.major(); err != nil { + return err + } + + if _, err := newVersion.minor(); err != nil { + return err + } + + *version = newVersion + return nil +} + +// CurrentVersion is the most recent Version that can be parsed +var CurrentVersion = MajorMinorVersion(0, 1) + +// Loglevel is the level at which operations are logged +// This can be error, warn, info, or debug +type Loglevel string + +// UnmarshalYAML implements the yaml.Umarshaler interface +// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a +// valid loglevel +func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var loglevelString string + err := unmarshal(&loglevelString) + if err != nil { + return err + } + + loglevelString = strings.ToLower(loglevelString) + switch loglevelString { + case "error", "warn", "info", "debug": + default: + return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + } + + *loglevel = Loglevel(loglevelString) + return nil } // Storage defines the configuration for registry object storage -type Storage struct { - // Type specifies the storage driver type (examples: inmemory, filesystem, s3, ...) - Type string +type Storage map[string]Parameters - // Parameters specifies the key/value parameters map passed to the storage driver constructor - Parameters map[string]string +// Type returns the storage driver type, such as filesystem or s3 +func (storage Storage) Type() string { + // Return only key in this map + for k := range storage { + return k + } + return "" } +// Parameters returns the Parameters map for a Storage configuration +func (storage Storage) Parameters() Parameters { + return storage[storage.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (storage Storage) setParameter(key, value string) { + storage[storage.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { + var storageMap map[string]Parameters + err := unmarshal(&storageMap) + if err == nil { + if len(storageMap) > 1 { + types := make([]string, 0, len(storageMap)) + for k := range storageMap { + types = append(types, k) + } + return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + } + *storage = storageMap + return nil + } + + var storageType string + err = unmarshal(&storageType) + if err == nil { + *storage = Storage{storageType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface func (storage Storage) MarshalYAML() (interface{}, error) { - return yaml.MapSlice{yaml.MapItem{storage.Type, storage.Parameters}}, nil + if storage.Parameters == nil { + return storage.Type, nil + } + return map[string]Parameters(storage), nil } -// untypedConfiguration is the unmarshalable configuration struct that only assumes the existence of -// a version string parameter -// This is done to parse the configuration version, then parse the remainder with a version-specific -// parser -type untypedConfiguration struct { - // Version is the version string defined in a configuration yaml - // This can safely parse versions defined as float types in yaml - Version string `yaml:"version"` - - // Registry is an untyped placeholder for the Registry configuration, which can later be parsed - // into a current Registry struct - Registry interface{} `yaml:"registry"` -} - -// V_0_1_RegistryConfiguration is the unmarshalable Registry configuration struct specific to -// Version{0, 1} -type V_0_1_RegistryConfiguration struct { - // LogLevel is the level at which the registry will log - // The loglevel can be overridden with the environment variable REGISTRY_LOGLEVEL, for example: - // REGISTRY_LOGLEVEL=info - LogLevel string `yaml:"loglevel"` - - // Storage is an untyped placeholder for the Storage configuration, which can later be parsed as - // a Storage struct - // The storage type can be overridden with the environment variable REGISTRY_STORAGE, for - // example: REGISTRY_STORAGE=s3 - // Note: If REGISTRY_STORAGE changes the storage type, all included parameters will be ignored - // The storage parameters can be overridden with any environment variable of the format: - // REGISTRY_STORAGE__, for example: - // REGISTRY_STORAGE_S3_BUCKET=my-bucket - Storage interface{} `yaml:"storage"` -} +// Parameters defines a key-value parameters mapping +type Parameters map[string]string // Parse parses an input configuration yaml document into a Configuration struct -// This should be capable of handling old configuration format versions +// This should generally be capable of handling old configuration format versions // -// Environment variables may be used to override configuration parameters other than version, which -// may be defined on a per-version basis. See V_0_1_RegistryConfiguration for more details +// Environment variables may be used to override configuration parameters other than version, +// following the scheme below: +// Configuration.Abc may be replaced by the value of REGISTRY_ABC, +// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth func Parse(in []byte) (*Configuration, error) { - var untypedConfig untypedConfiguration - var config Configuration + var untypedConfig struct { + Version Version + } + var config *Configuration err := yaml.Unmarshal(in, &untypedConfig) if err != nil { @@ -109,128 +192,70 @@ func Parse(in []byte) (*Configuration, error) { return nil, fmt.Errorf("Please specify a configuration version. Current version is %s", CurrentVersion) } - // Convert the version string from X.Y to Version{X, Y} - versionParts := strings.Split(untypedConfig.Version, ".") - if len(versionParts) != 2 { - return nil, fmt.Errorf("Invalid version: %s Expected format: X.Y", untypedConfig.Version) - } - majorVersion, err := strconv.ParseUint(versionParts[0], 10, 0) - if err != nil { - return nil, fmt.Errorf("Major version must be of type uint, received %v", versionParts[0]) - } - minorVersion, err := strconv.ParseUint(versionParts[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("Minor version must be of type uint, received %v", versionParts[1]) - } - config.Version = Version{Major: uint(majorVersion), Minor: uint(minorVersion)} - // Parse the remainder of the configuration depending on the provided version - switch config.Version { - case Version{0, 1}: - registry, err := parseV_0_1_Registry(untypedConfig.Registry) + switch untypedConfig.Version { + case "0.1": + config, err = parseV_0_1_Registry(in) if err != nil { return nil, err } - - config.Registry = *registry default: - return nil, fmt.Errorf("Unsupported configuration version %s Current version is %s", config.Version, CurrentVersion) + return nil, fmt.Errorf("Unsupported configuration version %s Current version is %s", untypedConfig.Version, CurrentVersion) } - switch config.Registry.LogLevel { - case "error", "warn", "info", "debug": - default: - return nil, fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", config.Registry.LogLevel) - } - - return &config, nil + return config, nil } -// parseV_0_1_Registry parses a Registry configuration for Version{0, 1} -func parseV_0_1_Registry(registry interface{}) (*Registry, error) { +// parseV_0_1_Registry parses a registry Configuration for Version 0.1 +func parseV_0_1_Registry(in []byte) (*Configuration, error) { envMap := getEnvMap() - registryBytes, err := yaml.Marshal(registry) - if err != nil { - return nil, err - } - var v_0_1 V_0_1_RegistryConfiguration - err = yaml.Unmarshal(registryBytes, &v_0_1) + var config v_0_1_Configuration + err := yaml.Unmarshal(in, &config) if err != nil { return nil, err } - if logLevel, ok := envMap["REGISTRY_LOGLEVEL"]; ok { - v_0_1.LogLevel = logLevel - } - v_0_1.LogLevel = strings.ToLower(v_0_1.LogLevel) - - var storage Storage - storage.Parameters = make(map[string]string) - - switch v_0_1.Storage.(type) { - case string: - // Storage is provided only by type - storage.Type = v_0_1.Storage.(string) - case map[interface{}]interface{}: - // Storage is provided as a {type: parameters} map - storageMap := v_0_1.Storage.(map[interface{}]interface{}) - if len(storageMap) > 1 { - keys := make([]string, 0, len(storageMap)) - for key := range storageMap { - keys = append(keys, toString(key)) - } - return nil, fmt.Errorf("Must provide exactly one storage type. Provided: %v", keys) + // Override config.Loglevel if environment variable is provided + if loglevel, ok := envMap["REGISTRY_LOGLEVEL"]; ok { + var newLoglevel Loglevel + err := yaml.Unmarshal([]byte(loglevel), &newLoglevel) + if err != nil { + return nil, err } - var params map[interface{}]interface{} - // There will only be one key-value pair at this point - for k, v := range storageMap { - // Parameters may be parsed as numerical or boolean values, so just convert these to - // strings - storage.Type = toString(k) - paramsMap, ok := v.(map[interface{}]interface{}) - if !ok { - return nil, fmt.Errorf("Must provide parameters as a map[string]string. Provided: %#v", v) - } - params = paramsMap - } - for k, v := range params { - storage.Parameters[toString(k)] = toString(v) - } - - case interface{}: - // Bad type for storage - return nil, fmt.Errorf("Registry storage must be provided by name, optionally with parameters. Provided: %v", v_0_1.Storage) + config.Loglevel = newLoglevel } + // Override config.Storage if environment variable is provided if storageType, ok := envMap["REGISTRY_STORAGE"]; ok { - if storageType != storage.Type { - storage.Type = storageType + if storageType != config.Storage.Type() { // Reset the storage parameters because we're using a different storage type - storage.Parameters = make(map[string]string) + config.Storage = Storage{storageType: Parameters{}} } } - if storage.Type == "" { - return nil, fmt.Errorf("Must provide exactly one storage type, optionally with parameters. Provided: %v", v_0_1.Storage) + if config.Storage.Type() == "" { + return nil, fmt.Errorf("Must provide exactly one storage type, optionally with parameters. Provided: %v", config.Storage) } - // Find all environment variables of the format: + // Override storage parameters with all environment variables of the format: // REGISTRY_STORAGE__ - storageParamsRegexp, err := regexp.Compile(fmt.Sprintf("^REGISTRY_STORAGE_%s_([A-Z0-9]+)$", strings.ToUpper(storage.Type))) + storageParamsRegexp, err := regexp.Compile(fmt.Sprintf("^REGISTRY_STORAGE_%s_([A-Z0-9]+)$", strings.ToUpper(config.Storage.Type()))) if err != nil { return nil, err } for k, v := range envMap { if submatches := storageParamsRegexp.FindStringSubmatch(k); submatches != nil { - storage.Parameters[strings.ToLower(submatches[1])] = v + config.Storage.setParameter(strings.ToLower(submatches[1]), v) } } - return &Registry{LogLevel: v_0_1.LogLevel, Storage: storage}, nil + return (*Configuration)(&config), nil } // getEnvMap reads the current environment variables and converts these into a key/value map +// This is used to distinguish between empty strings returned by os.GetEnv(key) because of undefined +// environment variables and explicitly empty ones func getEnvMap() map[string]string { envMap := make(map[string]string) for _, env := range os.Environ() { @@ -239,11 +264,3 @@ func getEnvMap() map[string]string { } return envMap } - -// toString converts reasonable objects into strings that may be used for configuration parameters -func toString(v interface{}) string { - if v == nil { - return "" - } - return fmt.Sprint(v) -} diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 8be767fb..cde679e2 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -14,46 +14,46 @@ func Test(t *testing.T) { TestingT(t) } // configStruct is a canonical example configuration, which should map to configYamlV_0_1 var configStruct = Configuration{ - Version: Version{ - Major: 0, - Minor: 1, - }, - Registry: Registry{ - LogLevel: "info", - Storage: Storage{ - Type: "s3", - Parameters: map[string]string{ - "region": "us-east-1", - "bucket": "my-bucket", - "rootpath": "/registry", - "encrypt": "true", - "secure": "false", - "accesskey": "SAMPLEACCESSKEY", - "secretkey": "SUPERSECRET", - "host": "", - "port": "", - }, + Version: "0.1", + Loglevel: "info", + Storage: Storage{ + "s3": Parameters{ + "region": "us-east-1", + "bucket": "my-bucket", + "rootpath": "/registry", + "encrypt": "true", + "secure": "false", + "accesskey": "SAMPLEACCESSKEY", + "secretkey": "SUPERSECRET", + "host": "", + "port": "", }, }, } -// configYamlV_0_1 is a Version{0, 1} yaml document representing configStruct +// configYamlV_0_1 is a Version 0.1 yaml document representing configStruct var configYamlV_0_1 = ` version: 0.1 +loglevel: info +storage: + s3: + region: us-east-1 + bucket: my-bucket + rootpath: /registry + encrypt: true + secure: false + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: ~ + port: ~ +` -registry: - loglevel: info - storage: - s3: - region: us-east-1 - bucket: my-bucket - rootpath: /registry - encrypt: true - secure: false - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: ~ - port: ~ +// inmemoryConfigYamlV_0_1 is a Version 0.1 yaml document specifying an inmemory storage driver with +// no parameters +var inmemoryConfigYamlV_0_1 = ` +version: 0.1 +loglevel: info +storage: inmemory ` type ConfigSuite struct { @@ -84,6 +84,16 @@ func (suite *ConfigSuite) TestParseSimple(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseInmemory validates that configuration yaml with storage provided as a string can be +// parsed into a Configuration struct with no storage parameters +func (suite *ConfigSuite) TestParseInmemory(c *C) { + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + + config, err := Parse([]byte(inmemoryConfigYamlV_0_1)) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + // TestParseWithSameEnvStorage validates that providing environment variables that match the given // storage type and parameters will not alter the parsed Configuration struct func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { @@ -99,9 +109,9 @@ func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { // and add to the given storage parameters will change and add parameters to the parsed // Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { - suite.expectedConfig.Registry.Storage.Parameters["region"] = "us-west-1" - suite.expectedConfig.Registry.Storage.Parameters["secure"] = "true" - suite.expectedConfig.Registry.Storage.Parameters["newparam"] = "some Value" + suite.expectedConfig.Storage.setParameter("region", "us-west-1") + suite.expectedConfig.Storage.setParameter("secure", "true") + suite.expectedConfig.Storage.setParameter("newparam", "some Value") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") @@ -115,7 +125,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { // TestParseWithDifferentEnvStorageType validates that providing an environment variable that // changes the storage type will be reflected in the parsed Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { - suite.expectedConfig.Registry.Storage = Storage{Type: "inmemory", Parameters: map[string]string{}} + suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} os.Setenv("REGISTRY_STORAGE", "inmemory") @@ -128,8 +138,8 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { // that changes the storage type will be reflected in the parsed Configuration struct and that // environment storage parameters will also be included func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { - suite.expectedConfig.Registry.Storage = Storage{Type: "filesystem", Parameters: map[string]string{}} - suite.expectedConfig.Registry.Storage.Parameters["rootdirectory"] = "/tmp/testroot" + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} + suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") @@ -152,7 +162,7 @@ func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { // TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the // log level will override the value provided in the yaml document func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { - suite.expectedConfig.Registry.LogLevel = "error" + suite.expectedConfig.Loglevel = "error" os.Setenv("REGISTRY_LOGLEVEL", "error") @@ -164,7 +174,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { // TestParseInvalidVersion validates that the parser will fail to parse a newer configuration // version than the CurrentVersion func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { - suite.expectedConfig.Version = Version{Major: CurrentVersion.Major, Minor: CurrentVersion.Minor + 1} + suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) _, err = Parse(configBytes) @@ -174,18 +184,11 @@ func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { func copyConfig(config Configuration) *Configuration { configCopy := new(Configuration) - configCopy.Version = *new(Version) - configCopy.Version.Major = config.Version.Major - configCopy.Version.Minor = config.Version.Minor - - configCopy.Registry = *new(Registry) - configCopy.Registry.LogLevel = config.Registry.LogLevel - - configCopy.Registry.Storage = *new(Storage) - configCopy.Registry.Storage.Type = config.Registry.Storage.Type - configCopy.Registry.Storage.Parameters = make(map[string]string) - for k, v := range config.Registry.Storage.Parameters { - configCopy.Registry.Storage.Parameters[k] = v + configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) + configCopy.Loglevel = config.Loglevel + configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} + for k, v := range config.Storage.Parameters() { + configCopy.Storage.setParameter(k, v) } return configCopy From 1ae548599828ca755cf5a2971adc4e5e2657fd8e Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 6 Nov 2014 12:16:14 -0800 Subject: [PATCH 018/165] Adds versioning for out-of-process storage driver The registry currently only accepts storage driver versions with the same major version and an equal or lower minor version as its own current storage driver api version, but this may be changed in the future if we decide to implement specific version cross-compatibility. --- storagedriver/README.md | 2 ++ storagedriver/ipc/client.go | 66 +++++++++++++++++++++++++--------- storagedriver/ipc/ipc.go | 26 ++++++++++++++ storagedriver/ipc/server.go | 5 +++ storagedriver/storagedriver.go | 24 +++++++++++++ 5 files changed, 106 insertions(+), 17 deletions(-) diff --git a/storagedriver/README.md b/storagedriver/README.md index f2795834..387e245b 100644 --- a/storagedriver/README.md +++ b/storagedriver/README.md @@ -40,6 +40,8 @@ Storage drivers should call `factory.Register` with their driver name in an `ini ### Out-of-process drivers As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `main/storagedriver/filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. +Out-of-process drivers must also implement the `ipc.IPCStorageDriver` interface, which exposes a `Version` check for the storage driver. This is used to validate storage driver api compatibility at driver load-time. + ## Testing Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index fd5f15c3..f4d5f49e 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -11,6 +11,7 @@ import ( "path" "syscall" + "github.com/docker/docker-registry/storagedriver" "github.com/docker/libchan" "github.com/docker/libchan/spdy" ) @@ -22,6 +23,7 @@ type StorageDriverClient struct { socket *os.File transport *spdy.Transport sender libchan.Sender + version storagedriver.Version } // NewDriverClient constructs a new out-of-process storage driver using the driver name and @@ -65,42 +67,62 @@ func (driver *StorageDriverClient) Start() error { } childSocket := os.NewFile(uintptr(fileDescriptors[0]), "childSocket") - parentSocket := os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") + driver.socket = os.NewFile(uintptr(fileDescriptors[1]), "parentSocket") driver.subprocess.Stdout = os.Stdout driver.subprocess.Stderr = os.Stderr driver.subprocess.ExtraFiles = []*os.File{childSocket} if err = driver.subprocess.Start(); err != nil { - parentSocket.Close() + driver.Stop() return err } if err = childSocket.Close(); err != nil { - parentSocket.Close() + driver.Stop() return err } - connection, err := net.FileConn(parentSocket) + connection, err := net.FileConn(driver.socket) if err != nil { - parentSocket.Close() + driver.Stop() return err } - transport, err := spdy.NewClientTransport(connection) + driver.transport, err = spdy.NewClientTransport(connection) if err != nil { - parentSocket.Close() + driver.Stop() return err } - sender, err := transport.NewSendChannel() + driver.sender, err = driver.transport.NewSendChannel() if err != nil { - transport.Close() - parentSocket.Close() + driver.Stop() return err } - driver.socket = parentSocket - driver.transport = transport - driver.sender = sender + // Check the driver's version to determine compatibility + receiver, remoteSender := libchan.Pipe() + err = driver.sender.Send(&Request{Type: "Version", ResponseChannel: remoteSender}) + if err != nil { + driver.Stop() + return err + } + + var response VersionResponse + err = receiver.Receive(&response) + if err != nil { + driver.Stop() + return err + } + + if response.Error != nil { + return response.Error + } + + driver.version = response.Version + + if driver.version.Major() != storagedriver.CurrentVersion.Major() || driver.version.Minor() > storagedriver.CurrentVersion.Minor() { + return IncompatibleVersionError{driver.version} + } return nil } @@ -108,10 +130,20 @@ func (driver *StorageDriverClient) Start() error { // Stop stops the child process storage driver // storagedriver.StorageDriver methods called after Stop will fail func (driver *StorageDriverClient) Stop() error { - closeSenderErr := driver.sender.Close() - closeTransportErr := driver.transport.Close() - closeSocketErr := driver.socket.Close() - killErr := driver.subprocess.Process.Kill() + var closeSenderErr, closeTransportErr, closeSocketErr, killErr error + + if driver.sender != nil { + closeSenderErr = driver.sender.Close() + } + if driver.transport != nil { + closeTransportErr = driver.transport.Close() + } + if driver.socket != nil { + closeSocketErr = driver.socket.Close() + } + if driver.subprocess != nil { + killErr = driver.subprocess.Process.Kill() + } if closeSenderErr != nil { return closeSenderErr diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 9c6b1dc0..f7eb897e 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -5,9 +5,29 @@ import ( "io" "reflect" + "github.com/docker/docker-registry/storagedriver" "github.com/docker/libchan" ) +// IPCStorageDriver is the interface which IPC storage drivers must implement. As external storage +// drivers may be defined to use a different version of the storagedriver.StorageDriver interface, +// we use an additional version check to determine compatiblity. +type IPCStorageDriver interface { + // Version returns the storagedriver.StorageDriver interface version which this storage driver + // implements, which is used to determine driver compatibility + Version() (storagedriver.Version, error) +} + +// IncompatibleVersionError is returned when a storage driver is using an incompatible version of +// the storagedriver.StorageDriver api +type IncompatibleVersionError struct { + version storagedriver.Version +} + +func (e IncompatibleVersionError) Error() string { + return fmt.Sprintf("Incompatible storage driver version: %s", e.version) +} + // Request defines a remote method call request // A return value struct is to be sent over the ResponseChannel type Request struct { @@ -38,6 +58,12 @@ func (err *responseError) Error() string { // IPC method call response object definitions +// VersionResponse is a response for a Version request +type VersionResponse struct { + Version storagedriver.Version + Error *responseError +} + // ReadStreamResponse is a response for a ReadStream request type ReadStreamResponse struct { Reader io.ReadCloser diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index d73be2f6..d6cd83f0 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -61,6 +61,11 @@ func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { // Responds to requests using the Request.ResponseChannel func handleRequest(driver storagedriver.StorageDriver, request Request) { switch request.Type { + case "Version": + err := request.ResponseChannel.Send(&VersionResponse{Version: storagedriver.CurrentVersion}) + if err != nil { + panic(err) + } case "GetContent": path, _ := request.Parameters["Path"].(string) content, err := driver.GetContent(path) diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index a66dba0c..b5da592f 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -3,8 +3,32 @@ package storagedriver import ( "fmt" "io" + "strconv" + "strings" ) +// Version is a string representing the storage driver version, of the form Major.Minor. +// The registry must accept storage drivers with equal major version and greater minor version, +// but may not be compatible with older storage driver versions. +type Version string + +// Major returns the major (primary) component of a version +func (version Version) Major() uint { + majorPart := strings.Split(string(version), ".")[0] + major, _ := strconv.ParseUint(majorPart, 10, 0) + return uint(major) +} + +// Minor returns the minor (secondary) component of a version +func (version Version) Minor() uint { + minorPart := strings.Split(string(version), ".")[1] + minor, _ := strconv.ParseUint(minorPart, 10, 0) + return uint(minor) +} + +// CurrentVersion is the current storage driver Version +const CurrentVersion Version = "0.1" + // StorageDriver defines methods that a Storage Driver must implement for a filesystem-like // key/value object storage type StorageDriver interface { From e31b7d8d9a30d9138819f54cbe7744c908e49396 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Tue, 21 Oct 2014 13:33:28 -0700 Subject: [PATCH 019/165] Initial open-design proposal --- CONTRIBUTING.md | 55 +++++++++++++++++++++++++++++++++++ MAINTAINERS | 3 ++ open-design/MANIFESTO.md | 20 +++++++++++++ open-design/ROADMAP.md | 41 ++++++++++++++++++++++++++ open-design/specs/TEMPLATE.md | 52 +++++++++++++++++++++++++++++++++ 5 files changed, 171 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 MAINTAINERS create mode 100644 open-design/MANIFESTO.md create mode 100644 open-design/ROADMAP.md create mode 100644 open-design/specs/TEMPLATE.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..3a706204 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to the registry + +## Are you having issues? + +Please first try any of these support forums before opening an issue: + + * irc #docker on freenode (archives: [https://botbot.me/freenode/docker/]) + * https://forums.docker.com/ + * if your problem is with the "hub" (the website and other user-facing components), or about automated builds, then please direct your issues to https://support.docker.com + +## So, you found a bug? + +First check if your problem was already reported in the issue tracker. + +If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have technical, relevant informations to add, by all means do! + +Your issue is not there? Then please, create a ticket. + +If possible the following guidelines should be followed: + + * try to come up with a minimal, simple to reproduce test-case + * try to add a title that describe succintly the issue + * if you are running your own registry, please provide: + * registry version + * registry launch command used + * registry configuration + * registry logs + * in all cases: + * `docker version` and `docker info` + * run your docker daemon in debug mode (-D), and provide docker daemon logs + + +## You have a patch for a known bug, or a small correction? + +Basic github workflow (fork, patch, make sure the tests pass, PR). + +... and some simple rules to ensure quick merge: + + * clearly point to the issue(s) you want to fix + * when possible, prefer multiple (smaller) PRs adressing individual issues over a big one trying to adress multiple issues at once + * if you need to amend your PR following comments, squash instead of adding more commits + +## You want some shiny new feature to be added? + +Fork the project. + +Create a new proposal in the folder open-design/specs, named DEP_MY_AWESOME_PROPOSAL.md, using open-design/specs/TEMPLATE.md as a starting point. + +Then immediately submit this new file as a pull-request, in order to get early feedback. + +Eventually, you will have to update your proposal to accommodate with the feedback you received. + +Usually, it's advised not to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected). + +Your implementation should then be submitted as a separate PR, that will be reviewed as well. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..2292fb99 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes (@shykes) +Olivier Gambier (@dmp42) +Sam Alba (@samalba) diff --git a/open-design/MANIFESTO.md b/open-design/MANIFESTO.md new file mode 100644 index 00000000..f3cd03da --- /dev/null +++ b/open-design/MANIFESTO.md @@ -0,0 +1,20 @@ +# The "Distribution" project + +## What is this + +This is a part of the Docker project, or "primitive" that handles the "distribution" of images. + +### Punchline + +Pack. Sign. Ship. Store. Deliver. Verify. + +### Technical scope + +Distribution has tight relations with: + + * libtrust, providing cryptographical primitives to handle image signing and verification + * image format, as transferred over the wire + * docker-registry, the server side component that allows storage and retrieval of packed images + * authentication and key management APIs, that are used to verify images and access storage services + * PKI infrastructure + * docker "pull/push client" code gluing all this together - network communication code, tarsum, etc diff --git a/open-design/ROADMAP.md b/open-design/ROADMAP.md new file mode 100644 index 00000000..3f0c042b --- /dev/null +++ b/open-design/ROADMAP.md @@ -0,0 +1,41 @@ +# Roadmap + +## 24/11/2014: alpha + +Design and code: + +- implements a basic configuration loading mechanism: https://github.com/docker/docker-registry/issues/646 +- storage API is frozen, implemented and used: https://github.com/docker/docker-registry/issues/616 +- REST API defined and partly implemented: https://github.com/docker/docker-registry/issues/634 +- basic logging: https://github.com/docker/docker-registry/issues/635 +- auth design is frozen: https://github.com/docker/docker-registry/issues/623 + +Environment: + +- some good practice are in place and documented: https://github.com/docker/docker-registry/issues/657 + +## 12/22/2014: beta + +Design and code: + +- feature freeze +- mirroring defined: https://github.com/docker/docker-registry/issues/658 +- extension model defined: https://github.com/docker/docker-registry/issues/613 + +Environment: + +- doc-driven approach: https://github.com/docker/docker-registry/issues/627 + +## 01/12/2015: RC + +Design and code: + +- third party drivers and extensions +- basic search extension +- third-party layers garbage collection scripts +- healthcheck endpoints: https://github.com/docker/docker-registry/issues/656 +- bugnsnag/new-relic support: https://github.com/docker/docker-registry/issues/680 + +Environment: + +- exhaustive test-cases diff --git a/open-design/specs/TEMPLATE.md b/open-design/specs/TEMPLATE.md new file mode 100644 index 00000000..a87ea61e --- /dev/null +++ b/open-design/specs/TEMPLATE.md @@ -0,0 +1,52 @@ +# DEP #X: Awesome proposal + +## Scope + +This is related to "Foo" (eg: authentication/storage/extension/...). + +## Abstract + +This proposal suggests to add support for "bar". + +## User stories + +"I'm a Hub user, and 'bar' allows me to do baz1" + +"I'm a FOSS user running my private registry and 'bar' allows me to do baz2" + +"I'm a company running the registry and 'bar' allows me to do baz3" + +## Technology pre-requisites + +'bar' can be implemented using: + + * foobar approach + * barfoo concurrent approach + +## Dependencies + +Project depends on baz to be completed (eg: docker engine support, or another registry proposal). + +## Technical proposal + +We are going to do foofoo alongside with some chunks of barbaz. + +## Roadmap + + * YYYY-MM-DD: proposal submitted + * YYYY-MM-DD: proposal reviewed and updated + * YYYY-MM-DD: implementation started (WIP PR) + * YYYY-MM-DD: implementation complete ready for thorough review + * YYYY-MM-DD: final PR version + * YYYY-MM-DD: implementation merged + +## Editors + +Editors: + + * my Company, or maybe just me + +Implementors: + + * me and my buddies + * another team working on a different approach \ No newline at end of file From 150677f1f592fbb6ce23db54d66cf57f1531bf37 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 6 Nov 2014 10:35:15 -0800 Subject: [PATCH 020/165] Expects storage driver executables to be of the form registry-storage-name Moves main packages to sub-packages of the individual storage drivers --- .../registry-storagedriver-filesystem/main.go | 0 .../registry-storagedriver-inmemory/main.go | 0 .../registry-storagedriver-s3/main.go | 0 storagedriver/README.md | 4 ++-- storagedriver/ipc/client.go | 18 ++++++++---------- 5 files changed, 10 insertions(+), 12 deletions(-) rename main/storagedriver/filesystem/filesystem.go => cmd/registry-storagedriver-filesystem/main.go (100%) rename main/storagedriver/inmemory/inmemory.go => cmd/registry-storagedriver-inmemory/main.go (100%) rename main/storagedriver/s3/s3.go => cmd/registry-storagedriver-s3/main.go (100%) diff --git a/main/storagedriver/filesystem/filesystem.go b/cmd/registry-storagedriver-filesystem/main.go similarity index 100% rename from main/storagedriver/filesystem/filesystem.go rename to cmd/registry-storagedriver-filesystem/main.go diff --git a/main/storagedriver/inmemory/inmemory.go b/cmd/registry-storagedriver-inmemory/main.go similarity index 100% rename from main/storagedriver/inmemory/inmemory.go rename to cmd/registry-storagedriver-inmemory/main.go diff --git a/main/storagedriver/s3/s3.go b/cmd/registry-storagedriver-s3/main.go similarity index 100% rename from main/storagedriver/s3/s3.go rename to cmd/registry-storagedriver-s3/main.go diff --git a/storagedriver/README.md b/storagedriver/README.md index f2795834..09ee7bf0 100644 --- a/storagedriver/README.md +++ b/storagedriver/README.md @@ -26,7 +26,7 @@ Driver Selection and Configuration The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. -Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the same name and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. +Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no driver is registered with the given name, this factory will attempt to find an executable storage driver with the executable name "registry-storage-\" and return an IPC storage driver wrapper managing the driver subprocess. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. Driver Contribution =================== @@ -38,7 +38,7 @@ To create a valid storage driver, one must implement the `storagedriver.StorageD Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. ### Out-of-process drivers -As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `main/storagedriver/filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. +As many users will run the registry as a pre-constructed docker container, storage drivers should also be distributable as IPC server executables. Drivers written in go should model the main method provided in `storagedriver/filesystem/registry-storage-filesystem/filesystem.go`. Parameters to IPC drivers will be provided as a JSON-serialized map in the first argument to the process. These parameters should be validated and then a blocking call to `ipc.StorageDriverServer` should be made with a new storage driver. ## Testing Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in go. Two methods are provided for registering test suites, `RegisterInProcessSuite` and `RegisterIPCSuite`, which run the same set of tests for the driver imported or managed over IPC respectively. diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index fd5f15c3..929eda61 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -8,13 +8,16 @@ import ( "net" "os" "os/exec" - "path" "syscall" "github.com/docker/libchan" "github.com/docker/libchan/spdy" ) +// StorageDriverExecutablePrefix is the prefix which the IPC storage driver loader expects driver +// executables to begin with. For example, the s3 driver should be named "registry-storage-s3". +const StorageDriverExecutablePrefix = "registry-storagedriver-" + // StorageDriverClient is a storagedriver.StorageDriver implementation using a managed child process // communicating over IPC using libchan with a unix domain socket type StorageDriverClient struct { @@ -38,15 +41,10 @@ func NewDriverClient(name string, parameters map[string]string) (*StorageDriverC return nil, err } - driverPath := os.ExpandEnv(path.Join("$GOPATH", "bin", name)) - if _, err := os.Stat(driverPath); os.IsNotExist(err) { - driverPath = path.Join(path.Dir(os.Args[0]), name) - } - if _, err := os.Stat(driverPath); os.IsNotExist(err) { - driverPath, err = exec.LookPath(name) - if err != nil { - return nil, err - } + driverExecName := StorageDriverExecutablePrefix + name + driverPath, err := exec.LookPath(driverExecName) + if err != nil { + return nil, err } command := exec.Command(driverPath, string(paramsBytes)) From cb1bdacbe3aacb6a56e53b9026ff7e82f0483b5a Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 7 Nov 2014 12:58:48 -0800 Subject: [PATCH 021/165] Renames ResumeWritePosition to CurrentSize in storage driver api --- storagedriver/filesystem/filesystem.go | 4 ++-- storagedriver/inmemory/inmemory.go | 4 ++-- storagedriver/ipc/client.go | 6 +++--- storagedriver/ipc/ipc.go | 4 ++-- storagedriver/ipc/server.go | 6 +++--- storagedriver/s3/s3.go | 12 ++++++------ storagedriver/storagedriver.go | 8 ++++---- storagedriver/testsuites/testsuites.go | 4 ++-- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go index 0bdf6017..2cca7890 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/filesystem.go @@ -98,7 +98,7 @@ func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error { defer reader.Close() - resumableOffset, err := d.ResumeWritePosition(subPath) + resumableOffset, err := d.CurrentSize(subPath) if _, pathNotFound := err.(storagedriver.PathNotFoundError); err != nil && !pathNotFound { return err } @@ -154,7 +154,7 @@ func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, read return err } -func (d *FilesystemDriver) ResumeWritePosition(subPath string) (uint64, error) { +func (d *FilesystemDriver) CurrentSize(subPath string) (uint64, error) { fullPath := d.subPath(subPath) fileInfo, err := os.Stat(fullPath) diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go index 9b9fd947..fee39bc9 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/inmemory.go @@ -78,7 +78,7 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io d.mutex.RLock() defer d.mutex.RUnlock() - resumableOffset, err := d.ResumeWritePosition(path) + resumableOffset, err := d.CurrentSize(path) if err != nil { return err } @@ -100,7 +100,7 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io return nil } -func (d *InMemoryDriver) ResumeWritePosition(path string) (uint64, error) { +func (d *InMemoryDriver) CurrentSize(path string) (uint64, error) { d.mutex.RLock() defer d.mutex.RUnlock() contents, ok := d.storage[path] diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 929eda61..8c74e084 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -216,16 +216,16 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, return nil } -func (driver *StorageDriverClient) ResumeWritePosition(path string) (uint64, error) { +func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path} - err := driver.sender.Send(&Request{Type: "ResumeWritePosition", Parameters: params, ResponseChannel: remoteSender}) + err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return 0, err } - var response ResumeWritePositionResponse + var response CurrentSizeResponse err = receiver.Receive(&response) if err != nil { return 0, err diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 9c6b1dc0..233e3891 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -49,8 +49,8 @@ type WriteStreamResponse struct { Error *responseError } -// ResumeWritePositionResponse is a response for a ResumeWritePosition request -type ResumeWritePositionResponse struct { +// CurrentSizeResponse is a response for a CurrentSize request +type CurrentSizeResponse struct { Position uint64 Error *responseError } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index d73be2f6..ccd0e3df 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -119,10 +119,10 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { if err != nil { panic(err) } - case "ResumeWritePosition": + case "CurrentSize": path, _ := request.Parameters["Path"].(string) - position, err := driver.ResumeWritePosition(path) - response := ResumeWritePositionResponse{ + position, err := driver.CurrentSize(path) + response := CurrentSizeResponse{ Position: position, Error: ResponseError(err), } diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index ea13b87c..c932a1e1 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -177,7 +177,7 @@ func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadC return nil } -func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) { +func (d *S3Driver) CurrentSize(path string) (uint64, error) { _, parts, err := d.getAllParts(path) if err != nil { return 0, err @@ -190,11 +190,11 @@ func (d *S3Driver) ResumeWritePosition(path string) (uint64, error) { return (((uint64(len(parts)) - 1) * uint64(parts[0].Size)) + uint64(parts[len(parts)-1].Size)), nil } -func (d *S3Driver) List(prefix string) ([]string, error) { - if prefix[len(prefix)-1] != '/' { - prefix = prefix + "/" +func (d *S3Driver) List(path string) ([]string, error) { + if path[len(path)-1] != '/' { + path = path + "/" } - listResponse, err := d.Bucket.List(prefix, "/", "", listPartsMax) + listResponse, err := d.Bucket.List(path, "/", "", listPartsMax) if err != nil { return nil, err } @@ -212,7 +212,7 @@ func (d *S3Driver) List(prefix string) ([]string, error) { } if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(prefix, "/", listResponse.NextMarker, listPartsMax) + listResponse, err = d.Bucket.List(path, "/", listResponse.NextMarker, listPartsMax) if err != nil { return nil, err } diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index a66dba0c..01ebd5ff 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -25,12 +25,12 @@ type StorageDriver interface { // the given path // The driver will know it has received the full contents when it has read "size" bytes // May be used to resume writing a stream by providing a nonzero offset - // The offset must be no larger than the ResumeWritePosition for this path + // The offset must be no larger than the CurrentSize for this path WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error - // ResumeWritePosition retrieves the byte offset at which it is safe to continue writing at the - // given path - ResumeWritePosition(path string) (uint64, error) + // CurrentSize retrieves the curernt size in bytes of the object at the given path + // It should be safe to read or write anywhere up to this point + CurrentSize(path string) (uint64, error) // List returns a list of the objects that are direct descendants of the given path List(path string) ([]string, error) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 94d85461..d2859913 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -160,7 +160,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) { err := suite.StorageDriver.WriteStream(filename, 0, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(contentsChunk1))) c.Assert(err, IsNil) - offset, err := suite.StorageDriver.ResumeWritePosition(filename) + offset, err := suite.StorageDriver.CurrentSize(filename) c.Assert(err, IsNil) if offset > chunkSize { c.Fatalf("Offset too large, %d > %d", offset, chunkSize) @@ -168,7 +168,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) { err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) c.Assert(err, IsNil) - offset, err = suite.StorageDriver.ResumeWritePosition(filename) + offset, err = suite.StorageDriver.CurrentSize(filename) c.Assert(err, IsNil) if offset > 2*chunkSize { c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize) From fec2afc93fee50e4980d52912707d0e692d65291 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 7 Nov 2014 16:08:14 -0800 Subject: [PATCH 022/165] Initial V2 API Router Implementation This commit includes the initial API router, based on gorilla mux and a test suite ensuring the expected variables are extracted. Currently unexported, the structure here will likely change as this definition will be shared with the API client. --- routes.go | 72 +++++++++++++++++++++++++++++ routes_test.go | 122 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100644 routes.go create mode 100644 routes_test.go diff --git a/routes.go b/routes.go new file mode 100644 index 00000000..10c9e398 --- /dev/null +++ b/routes.go @@ -0,0 +1,72 @@ +package registry + +import ( + "github.com/gorilla/mux" +) + +const ( + routeNameRoot = "root" + routeNameName = "name" + routeNameImageManifest = "image-manifest" + routeNameTags = "tags" + routeNameLayer = "layer" + routeNameStartLayerUpload = "start-layer-upload" + routeNameLayerUpload = "layer-upload" +) + +var allEndpoints = []string{ + routeNameImageManifest, + routeNameTags, + routeNameLayer, + routeNameStartLayerUpload, + routeNameLayerUpload, +} + +// v2APIRouter builds a gorilla router with named routes for the various API +// methods. We may export this for use by the client. +func v2APIRouter() *mux.Router { + router := mux.NewRouter() + + rootRouter := router. + PathPrefix("/v2"). + Name(routeNameRoot). + Subrouter() + + // All routes are subordinate to named routes + namedRouter := rootRouter. + PathPrefix("/{name:[A-Za-z0-9-_]+/[A-Za-z0-9-_]+}"). // TODO(stevvooe): Verify this format with core + Name(routeNameName). + Subrouter(). + StrictSlash(true) + + // GET /v2//image/ Image Manifest Fetch the image manifest identified by name and tag. + // PUT /v2//image/ Image Manifest Upload the image manifest identified by name and tag. + // DELETE /v2//image/ Image Manifest Delete the image identified by name and tag. + namedRouter. + Path("/image/{tag:[A-Za-z0-9-_]+}"). + Name(routeNameImageManifest) + + // GET /v2//tags Tags Fetch the tags under the repository identified by name. + namedRouter. + Path("/tags"). + Name(routeNameTags) + + // GET /v2//layer/ Layer Fetch the layer identified by tarsum. + namedRouter. + Path("/layer/{tarsum}"). + Name(routeNameLayer) + + // POST /v2//layer//upload/ Layer Upload Initiate an upload of the layer identified by tarsum. Requires length and a checksum parameter. + namedRouter. + Path("/layer/{tarsum}/upload/"). + Name(routeNameStartLayerUpload) + + // GET /v2//layer//upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. + // PUT /v2//layer//upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. + // DELETE /v2//layer//upload/ Layer Upload Cancel the upload identified by layer and uuid + namedRouter. + Path("/layer/{tarsum}/upload/{uuid}"). + Name(routeNameLayerUpload) + + return router +} diff --git a/routes_test.go b/routes_test.go new file mode 100644 index 00000000..6b1daf80 --- /dev/null +++ b/routes_test.go @@ -0,0 +1,122 @@ +package registry + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/gorilla/mux" +) + +type routeInfo struct { + RequestURI string + Vars map[string]string +} + +// TestRouter registers a test handler with all the routes and ensures that +// each route returns the expected path variables. Not method verification is +// present. This not meant to be exhaustive but as check to ensure that the +// expected variables are extracted. +// +// This may go away as the application structure comes together. +func TestRouter(t *testing.T) { + + router := v2APIRouter() + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + routeInfo := routeInfo{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(routeInfo); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range []struct { + routeName string + expectedRouteInfo routeInfo + }{ + { + routeName: routeNameImageManifest, + expectedRouteInfo: routeInfo{ + RequestURI: "/v2/foo/bar/image/tag", + Vars: map[string]string{ + "name": "foo/bar", + "tag": "tag", + }, + }, + }, + { + routeName: routeNameTags, + expectedRouteInfo: routeInfo{ + RequestURI: "/v2/foo/bar/tags", + Vars: map[string]string{ + "name": "foo/bar", + }, + }, + }, + { + routeName: routeNameLayer, + expectedRouteInfo: routeInfo{ + RequestURI: "/v2/foo/bar/layer/tarsum", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum", + }, + }, + }, + { + routeName: routeNameStartLayerUpload, + expectedRouteInfo: routeInfo{ + RequestURI: "/v2/foo/bar/layer/tarsum/upload/", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum", + }, + }, + }, + { + routeName: routeNameLayerUpload, + expectedRouteInfo: routeInfo{ + RequestURI: "/v2/foo/bar/layer/tarsum/upload/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum", + "uuid": "uuid", + }, + }, + }, + } { + // Register the endpoint + router.GetRoute(testcase.routeName).Handler(testHandler) + u := server.URL + testcase.expectedRouteInfo.RequestURI + + resp, err := http.Get(u) + + if err != nil { + t.Fatalf("error issuing get request: %v", err) + } + + dec := json.NewDecoder(resp.Body) + + var actualRouteInfo routeInfo + if err := dec.Decode(&actualRouteInfo); err != nil { + t.Fatalf("error reading json response: %v", err) + } + + if !reflect.DeepEqual(actualRouteInfo, testcase.expectedRouteInfo) { + t.Fatalf("actual does not equal expected: %v != %v", actualRouteInfo, testcase.expectedRouteInfo) + } + } + +} From 2b7b8fa2ca5ac9ffe4ad420620b4e2ce0e03f83b Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Tue, 21 Oct 2014 13:33:28 -0700 Subject: [PATCH 023/165] Initial open-design proposal --- CONTRIBUTING.md | 15 +++++++-------- open-design/ROADMAP.md | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3a706204..fb7fcf78 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,14 +12,14 @@ Please first try any of these support forums before opening an issue: First check if your problem was already reported in the issue tracker. -If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have technical, relevant informations to add, by all means do! +If it's already there, please refrain from adding "same here" comments - these don't add any value and are only adding useless noise. **Said comments will quite often be deleted at sight**. On the other hand, if you have any technical, relevant information to add, by all means do! Your issue is not there? Then please, create a ticket. If possible the following guidelines should be followed: * try to come up with a minimal, simple to reproduce test-case - * try to add a title that describe succintly the issue + * try to add a title that describe succinctly the issue * if you are running your own registry, please provide: * registry version * registry launch command used @@ -27,8 +27,7 @@ If possible the following guidelines should be followed: * registry logs * in all cases: * `docker version` and `docker info` - * run your docker daemon in debug mode (-D), and provide docker daemon logs - + * run your docker daemon in debug mode (-D), and provide docker daemon logs ## You have a patch for a known bug, or a small correction? @@ -37,19 +36,19 @@ Basic github workflow (fork, patch, make sure the tests pass, PR). ... and some simple rules to ensure quick merge: * clearly point to the issue(s) you want to fix - * when possible, prefer multiple (smaller) PRs adressing individual issues over a big one trying to adress multiple issues at once + * when possible, prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once * if you need to amend your PR following comments, squash instead of adding more commits ## You want some shiny new feature to be added? Fork the project. -Create a new proposal in the folder open-design/specs, named DEP_MY_AWESOME_PROPOSAL.md, using open-design/specs/TEMPLATE.md as a starting point. +Create a new proposal in the folder `open-design/specs`, named `DEP_MY_AWESOME_PROPOSAL.md`, using `open-design/specs/TEMPLATE.md` as a starting point. Then immediately submit this new file as a pull-request, in order to get early feedback. -Eventually, you will have to update your proposal to accommodate with the feedback you received. +Eventually, you will have to update your proposal to accommodate the feedback you received. -Usually, it's advised not to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected). +Usually, it's not advisable to start working too much on the implementation itself before the proposal receives sufficient feedback, since it can significantly altered (or rejected). Your implementation should then be submitted as a separate PR, that will be reviewed as well. diff --git a/open-design/ROADMAP.md b/open-design/ROADMAP.md index 3f0c042b..54e244ad 100644 --- a/open-design/ROADMAP.md +++ b/open-design/ROADMAP.md @@ -1,6 +1,6 @@ # Roadmap -## 24/11/2014: alpha +## 11/24/2014: alpha Design and code: From af0411420a3155ed6ae2b344ccdc01a134e4e0a7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 10 Nov 2014 15:09:10 -0800 Subject: [PATCH 024/165] Initial implementation of API errors data structure --- errors.go | 157 +++++++++++++++++++++++++++++++++++++++++++++++++ errors_test.go | 77 ++++++++++++++++++++++++ 2 files changed, 234 insertions(+) create mode 100644 errors.go create mode 100644 errors_test.go diff --git a/errors.go b/errors.go new file mode 100644 index 00000000..75151594 --- /dev/null +++ b/errors.go @@ -0,0 +1,157 @@ +package registry + +import ( + "fmt" + "strings" +) + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +const ( + ErrorCodeUnknown ErrorCode = iota + + // The following errors can happen during a layer upload. + ErrorCodeInvalidChecksum + ErrorCodeInvalidLength + ErrorCodeInvalidTarsum + + // The following errors can happen during manifest upload. + ErrorCodeInvalidName + ErrorCodeInvalidTag + ErrorCodeUnverifiedManifest + ErrorCodeUnknownLayer + ErrorCodeUntrustedSignature +) + +var errorCodeStrings = map[ErrorCode]string{ + ErrorCodeUnknown: "UNKNOWN", + ErrorCodeInvalidChecksum: "INVALID_CHECKSUM", + ErrorCodeInvalidLength: "INVALID_LENGTH", + ErrorCodeInvalidTarsum: "INVALID_TARSUM", + ErrorCodeInvalidName: "INVALID_NAME", + ErrorCodeInvalidTag: "INVALID_TAG", + ErrorCodeUnverifiedManifest: "UNVERIFIED_MANIFEST", + ErrorCodeUnknownLayer: "UNKNOWN_LAYER", + ErrorCodeUntrustedSignature: "UNTRUSTED_SIGNATURE", +} + +var errorCodesMessages = map[ErrorCode]string{ + ErrorCodeUnknown: "unknown error", + ErrorCodeInvalidChecksum: "provided checksum did not match uploaded content", + ErrorCodeInvalidLength: "provided length did not match content length", + ErrorCodeInvalidTarsum: "provided tarsum did not match binary content", + ErrorCodeInvalidName: "Manifest name did not match URI", + ErrorCodeInvalidTag: "Manifest tag did not match URI", + ErrorCodeUnverifiedManifest: "Manifest failed signature validation", + ErrorCodeUnknownLayer: "Referenced layer not available", + ErrorCodeUntrustedSignature: "Manifest signed by untrusted source", +} + +var stringToErrorCode map[string]ErrorCode + +func init() { + stringToErrorCode = make(map[string]ErrorCode, len(errorCodeStrings)) + + // Build up reverse error code map + for k, v := range errorCodeStrings { + stringToErrorCode[v] = k + } +} + +// ParseErrorCode attempts to parse the error code string, returning +// ErrorCodeUnknown if the error is not known. +func ParseErrorCode(s string) ErrorCode { + ec, ok := stringToErrorCode[s] + + if !ok { + return ErrorCodeUnknown + } + + return ec +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + s, ok := errorCodeStrings[ec] + + if !ok { + return errorCodeStrings[ErrorCodeUnknown] + } + + return s +} + +func (ec ErrorCode) Message() string { + m, ok := errorCodesMessages[ec] + + if !ok { + return errorCodesMessages[ErrorCodeUnknown] + } + + return m +} + +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +func (ec *ErrorCode) UnmarshalText(text []byte) error { + *ec = stringToErrorCode[string(text)] + + return nil +} + +type Error struct { + Code ErrorCode `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.Title(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Push pushes an error on to the error stack, with the optional detail +// argument. It is a programming error (ie panic) to push more than one +// detail at a time. +func (errs *Errors) Push(code ErrorCode, details ...interface{}) { + if len(details) > 1 { + panic("please specify zero or one detail items for this error") + } + + var detail interface{} + if len(details) > 0 { + detail = details[0] + } + + errs.Errors = append(errs.Errors, Error{ + Code: code, + Message: code.Message(), + Detail: detail, + }) +} + +// detailUnknownLayer provides detail for unknown layer errors, returned by +// image manifest push for layers that are not yet transferred. This intended +// to only be used on the backend to return detail for this specific error. +type DetailUnknownLayer struct { + + // Unknown should contain the contents of a layer descriptor, which is a + // single json object with the key "blobSum" currently. + Unknown struct { + + // BlobSum contains the uniquely identifying tarsum of the layer. + BlobSum string `json:"blobSum"` + } `json:"unknown"` +} diff --git a/errors_test.go b/errors_test.go new file mode 100644 index 00000000..dc6a8de7 --- /dev/null +++ b/errors_test.go @@ -0,0 +1,77 @@ +package registry + +import ( + "encoding/json" + "testing" +) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + for ec, _ := range errorCodeStrings { + if ec.String() != errorCodeStrings[ec] { + t.Fatalf("error code string incorrect: %q != %q", ec.String(), errorCodeStrings[ec]) + } + + if ec.Message() != errorCodesMessages[ec] { + t.Fatalf("incorrect message for error code %v: %q != !q", ec, ec.Message(), errorCodesMessages[ec]) + } + + // Serialize the error code using the json library to ensure that we + // get a string and it works round trip. + p, err := json.Marshal(ec) + + if err != nil { + t.Fatalf("error marshaling error code %v: %v", ec, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v: %v", ec) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", ec, err) + } + + if ecUnmarshaled != ec { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) + } + } +} + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs.Push(ErrorCodeInvalidChecksum) + + var detail DetailUnknownLayer + detail.Unknown.BlobSum = "sometestblobsumdoesntmatter" + + errs.Push(ErrorCodeUnknownLayer, detail) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := "{\"errors\":[{\"code\":\"INVALID_CHECKSUM\",\"message\":\"provided checksum did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"Referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } +} From da7eef2e0427d4f077a46b6ae39749fa8f7abf66 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 10 Nov 2014 15:56:23 -0800 Subject: [PATCH 025/165] Allow Errors to be an error itself This has Errors implement the error interface, allowing it to pose as an error itself. Use of this in the server may be minimal, for now, but it's useful for passing around opaque client errors. A method, PushErr, has also been add to allow arbitrary errors to be passed into the Errors list. This keeps the errors list flexible, allowing the app to collect and errors before we have codes properly mapped. --- errors.go | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/errors.go b/errors.go index 75151594..53dcb6bf 100644 --- a/errors.go +++ b/errors.go @@ -119,7 +119,7 @@ func (e Error) Error() string { // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. type Errors struct { - Errors []Error `json:"errors,omitempty"` + Errors []error `json:"errors,omitempty"` } // Push pushes an error on to the error stack, with the optional detail @@ -135,13 +135,33 @@ func (errs *Errors) Push(code ErrorCode, details ...interface{}) { detail = details[0] } - errs.Errors = append(errs.Errors, Error{ + errs.PushErr(Error{ Code: code, Message: code.Message(), Detail: detail, }) } +// PushErr pushes an error interface onto the error stack. +func (errs *Errors) PushErr(err error) { + errs.Errors = append(errs.Errors, err) +} + +func (errs *Errors) Error() string { + switch len(errs.Errors) { + case 0: + return "" + case 1: + return errs.Errors[0].Error() + default: + msg := "errors:\n" + for _, err := range errs.Errors { + msg += err.Error() + "\n" + } + return msg + } +} + // detailUnknownLayer provides detail for unknown layer errors, returned by // image manifest push for layers that are not yet transferred. This intended // to only be used on the backend to return detail for this specific error. From 0618a2ebd73fe2965cc5ff3f67e69162bedd7b74 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 10 Nov 2014 18:26:06 -0800 Subject: [PATCH 026/165] Clearer names for layer upload routes --- routes.go | 20 ++++++++++---------- routes_test.go | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/routes.go b/routes.go index 10c9e398..d4942696 100644 --- a/routes.go +++ b/routes.go @@ -5,21 +5,21 @@ import ( ) const ( - routeNameRoot = "root" - routeNameName = "name" - routeNameImageManifest = "image-manifest" - routeNameTags = "tags" - routeNameLayer = "layer" - routeNameStartLayerUpload = "start-layer-upload" - routeNameLayerUpload = "layer-upload" + routeNameRoot = "root" + routeNameName = "name" + routeNameImageManifest = "image-manifest" + routeNameTags = "tags" + routeNameLayer = "layer" + routeNameLayerUpload = "layer-upload" + routeNameLayerUploadResume = "layer-upload-resume" ) var allEndpoints = []string{ routeNameImageManifest, routeNameTags, routeNameLayer, - routeNameStartLayerUpload, routeNameLayerUpload, + routeNameLayerUploadResume, } // v2APIRouter builds a gorilla router with named routes for the various API @@ -59,14 +59,14 @@ func v2APIRouter() *mux.Router { // POST /v2//layer//upload/ Layer Upload Initiate an upload of the layer identified by tarsum. Requires length and a checksum parameter. namedRouter. Path("/layer/{tarsum}/upload/"). - Name(routeNameStartLayerUpload) + Name(routeNameLayerUpload) // GET /v2//layer//upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. // PUT /v2//layer//upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. // DELETE /v2//layer//upload/ Layer Upload Cancel the upload identified by layer and uuid namedRouter. Path("/layer/{tarsum}/upload/{uuid}"). - Name(routeNameLayerUpload) + Name(routeNameLayerUploadResume) return router } diff --git a/routes_test.go b/routes_test.go index 6b1daf80..e3ef371a 100644 --- a/routes_test.go +++ b/routes_test.go @@ -76,7 +76,7 @@ func TestRouter(t *testing.T) { }, }, { - routeName: routeNameStartLayerUpload, + routeName: routeNameLayerUpload, expectedRouteInfo: routeInfo{ RequestURI: "/v2/foo/bar/layer/tarsum/upload/", Vars: map[string]string{ @@ -86,7 +86,7 @@ func TestRouter(t *testing.T) { }, }, { - routeName: routeNameLayerUpload, + routeName: routeNameLayerUploadResume, expectedRouteInfo: routeInfo{ RequestURI: "/v2/foo/bar/layer/tarsum/upload/uuid", Vars: map[string]string{ From 22c9f45598491efe30718a3c5260b4fc170a4883 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 10 Nov 2014 18:57:38 -0800 Subject: [PATCH 027/165] Carve out initial application structure This changeset defines the application structure to be used for the http side of the new registry. The main components are the App and Context structs. The App context is instance global and manages global configuration and resources. Context contains request-specific resources that may be created as a by-product of an in-flight request. To latently construct per-request handlers and leverage gorilla/mux, a dispatch structure has been propped up next to the main handler flow. Without this, a router and all handlers need to be constructed on every request. By constructing handlers on each request, we ensure thread isolation and can carefully control the security context of in-flight requests. There are unit tests covering this functionality. --- app.go | 94 ++++++++++++++++++++++++++++++++++++ app_test.go | 127 +++++++++++++++++++++++++++++++++++++++++++++++++ context.go | 34 +++++++++++++ helpers.go | 20 ++++++++ images.go | 46 ++++++++++++++++++ layer.go | 34 +++++++++++++ layerupload.go | 63 ++++++++++++++++++++++++ tags.go | 28 +++++++++++ util.go | 27 +++++++++++ 9 files changed, 473 insertions(+) create mode 100644 app.go create mode 100644 app_test.go create mode 100644 context.go create mode 100644 helpers.go create mode 100644 images.go create mode 100644 layer.go create mode 100644 layerupload.go create mode 100644 tags.go create mode 100644 util.go diff --git a/app.go b/app.go new file mode 100644 index 00000000..63635250 --- /dev/null +++ b/app.go @@ -0,0 +1,94 @@ +package registry + +import ( + "net/http" + + "github.com/docker/docker-registry/configuration" + + log "github.com/Sirupsen/logrus" + "github.com/gorilla/mux" +) + +// App is a global registry application object. Shared resources can be placed +// on this object that will be accessible from all requests. Any writable +// fields should be protected. +type App struct { + Config configuration.Configuration + + router *mux.Router +} + +// NewApp takes a configuration and returns a configured app, ready to serve +// requests. The app only implements ServeHTTP and can be wrapped in other +// handlers accordingly. +func NewApp(configuration configuration.Configuration) *App { + app := &App{ + Config: configuration, + router: v2APIRouter(), + } + + // Register the handler dispatchers. + app.register(routeNameImageManifest, imageManifestDispatcher) + app.register(routeNameLayer, layerDispatcher) + app.register(routeNameTags, tagsDispatcher) + app.register(routeNameLayerUpload, layerUploadDispatcher) + app.register(routeNameLayerUploadResume, layerUploadDispatcher) + + return app +} + +func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { + app.router.ServeHTTP(w, r) +} + +// register a handler with the application, by route name. The handler will be +// passed through the application filters and context will be constructed at +// request time. +func (app *App) register(routeName string, dispatch dispatchFunc) { + + // TODO(stevvooe): This odd dispatcher/route registration is by-product of + // some limitations in the gorilla/mux router. We are using it to keep + // routing consistent between the client and server, but we may want to + // replace it with manual routing and structure-based dispatch for better + // control over the request execution. + + app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) +} + +// dispatchFunc takes a context and request and returns a constructed handler +// for the route. The dispatcher will use this to dynamically create request +// specific handlers for each endpoint without creating a new router for each +// request. +type dispatchFunc func(ctx *Context, r *http.Request) http.Handler + +// TODO(stevvooe): dispatchers should probably have some validation error +// chain with proper error reporting. + +// dispatcher returns a handler that constructs a request specific context and +// handler, using the dispatch factory function. +func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + context := &Context{ + App: app, + Name: vars["name"], + } + + // Store vars for underlying handlers. + context.vars = vars + + context.log = log.WithField("name", context.Name) + handler := dispatch(context, r) + + context.log.Infoln("handler", resolveHandlerName(r.Method, handler)) + handler.ServeHTTP(w, r) + + // Automated error response handling here. Handlers may return their + // own errors if they need different behavior (such as range errors + // for layer upload). + if len(context.Errors.Errors) > 0 { + w.WriteHeader(http.StatusBadRequest) + serveJSON(w, context.Errors) + } + }) +} diff --git a/app_test.go b/app_test.go new file mode 100644 index 00000000..43b001ec --- /dev/null +++ b/app_test.go @@ -0,0 +1,127 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker-registry/configuration" +) + +// TestAppDispatcher builds an application with a test dispatcher and ensures +// that requests are properly dispatched and the handlers are constructed. +// This only tests the dispatch mechanism. The underlying dispatchers must be +// tested individually. +func TestAppDispatcher(t *testing.T) { + app := &App{ + Config: configuration.Configuration{}, + router: v2APIRouter(), + } + server := httptest.NewServer(app) + router := v2APIRouter() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { + return func(ctx *Context, r *http.Request) http.Handler { + // Always checks the same name context + if ctx.Name != ctx.vars["name"] { + t.Fatalf("unexpected name: %q != %q", ctx.Name, "foo/bar") + } + + // Check that we have all that is expected + for expectedK, expectedV := range expectedVars { + if ctx.vars[expectedK] != expectedV { + t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.vars[expectedK], expectedV) + } + } + + // Check that we only have variables that are expected + for k, v := range ctx.vars { + _, ok := expectedVars[k] + + if !ok { // name is checked on context + // We have an unexpected key, fail + t.Fatalf("unexpected key %q in vars with value %q", k, v) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + } + } + + // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string + unflatten := func(vars []string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(vars)-1; i = i + 2 { + m[vars[i]] = vars[i+1] + } + + return m + } + + for _, testcase := range []struct { + endpoint string + vars []string + }{ + { + endpoint: routeNameImageManifest, + vars: []string{ + "name", "foo/bar", + "tag", "sometag", + }, + }, + { + endpoint: routeNameTags, + vars: []string{ + "name", "foo/bar", + }, + }, + { + endpoint: routeNameLayer, + vars: []string{ + "name", "foo/bar", + "tarsum", "thetarsum", + }, + }, + { + endpoint: routeNameLayerUpload, + vars: []string{ + "name", "foo/bar", + "tarsum", "thetarsum", + }, + }, + { + endpoint: routeNameLayerUploadResume, + vars: []string{ + "name", "foo/bar", + "tarsum", "thetarsum", + "uuid", "theuuid", + }, + }, + } { + app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) + route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) + u, err := route.URL(testcase.vars...) + + if err != nil { + t.Fatal(err) + } + + resp, err := http.Get(u.String()) + + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) + } + } +} diff --git a/context.go b/context.go new file mode 100644 index 00000000..a5706b4e --- /dev/null +++ b/context.go @@ -0,0 +1,34 @@ +package registry + +import ( + "github.com/Sirupsen/logrus" +) + +// Context should contain the request specific context for use in across +// handlers. Resources that don't need to be shared across handlers should not +// be on this object. +type Context struct { + // App points to the application structure that created this context. + *App + + // Name is the prefix for the current request. Corresponds to the + // namespace/repository associated with the image. + Name string + + // Errors is a collection of errors encountered during the request to be + // returned to the client API. If errors are added to the collection, the + // handler *must not* start the response via http.ResponseWriter. + Errors Errors + + // TODO(stevvooe): Context would be a good place to create a + // representation of the "authorized resource". Perhaps, rather than + // having fields like "name", the context should be a set of parameters + // then we do routing from there. + + // vars contains the extracted gorilla/mux variables that can be used for + // assignment. + vars map[string]string + + // log provides a context specific logger. + log *logrus.Entry +} diff --git a/helpers.go b/helpers.go new file mode 100644 index 00000000..b3b9d744 --- /dev/null +++ b/helpers.go @@ -0,0 +1,20 @@ +package registry + +import ( + "encoding/json" + "net/http" +) + +// serveJSON marshals v and sets the content-type header to +// 'application/json'. If a different status code is required, call +// ResponseWriter.WriteHeader before this function. +func serveJSON(w http.ResponseWriter, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + + if err := enc.Encode(v); err != nil { + return err + } + + return nil +} diff --git a/images.go b/images.go new file mode 100644 index 00000000..f16a3560 --- /dev/null +++ b/images.go @@ -0,0 +1,46 @@ +package registry + +import ( + "net/http" + + "github.com/gorilla/handlers" +) + +// imageManifestDispatcher takes the request context and builds the +// appropriate handler for handling image manifest requests. +func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { + imageManifestHandler := &imageManifestHandler{ + Context: ctx, + Tag: ctx.vars["tag"], + } + + imageManifestHandler.log = imageManifestHandler.log.WithField("tag", imageManifestHandler.Tag) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "PUT": http.HandlerFunc(imageManifestHandler.PutImageManifest), + "DELETE": http.HandlerFunc(imageManifestHandler.DeleteImageManifest), + } +} + +// imageManifestHandler handles http operations on image manifests. +type imageManifestHandler struct { + *Context + + Tag string +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + +} + +// PutImageManifest validates and stores and image in the registry. +func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + +} + +// DeleteImageManifest removes the image with the given tag from the registry. +func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + +} diff --git a/layer.go b/layer.go new file mode 100644 index 00000000..96920a8e --- /dev/null +++ b/layer.go @@ -0,0 +1,34 @@ +package registry + +import ( + "net/http" + + "github.com/gorilla/handlers" +) + +// layerDispatcher uses the request context to build a layerHandler. +func layerDispatcher(ctx *Context, r *http.Request) http.Handler { + layerHandler := &layerHandler{ + Context: ctx, + TarSum: ctx.vars["tarsum"], + } + + layerHandler.log = layerHandler.log.WithField("tarsum", layerHandler.TarSum) + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(layerHandler.GetLayer), + } +} + +// layerHandler serves http layer requests. +type layerHandler struct { + *Context + + TarSum string +} + +// GetLayer fetches the binary data from backend storage returns it in the +// response. +func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { + +} diff --git a/layerupload.go b/layerupload.go new file mode 100644 index 00000000..3eb2ff9a --- /dev/null +++ b/layerupload.go @@ -0,0 +1,63 @@ +package registry + +import ( + "net/http" + + "github.com/gorilla/handlers" +) + +// layerUploadDispatcher constructs and returns the layer upload handler for +// the given request context. +func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { + layerUploadHandler := &layerUploadHandler{ + Context: ctx, + TarSum: ctx.vars["tarsum"], + UUID: ctx.vars["uuid"], + } + + layerUploadHandler.log = layerUploadHandler.log.WithField("tarsum", layerUploadHandler.TarSum) + + if layerUploadHandler.UUID != "" { + layerUploadHandler.log = layerUploadHandler.log.WithField("uuid", layerUploadHandler.UUID) + } + + return handlers.MethodHandler{ + "POST": http.HandlerFunc(layerUploadHandler.StartLayerUpload), + "GET": http.HandlerFunc(layerUploadHandler.GetUploadStatus), + "PUT": http.HandlerFunc(layerUploadHandler.PutLayerChunk), + "DELETE": http.HandlerFunc(layerUploadHandler.CancelLayerUpload), + } +} + +// layerUploadHandler handles the http layer upload process. +type layerUploadHandler struct { + *Context + + // TarSum is the unique identifier of the layer being uploaded. + TarSum string + + // UUID identifies the upload instance for the current request. + UUID string +} + +// StartLayerUpload begins the layer upload process and allocates a server- +// side upload session. +func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { + +} + +// GetUploadStatus returns the status of a given upload, identified by uuid. +func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + +} + +// PutLayerChunk receives a layer chunk during the layer upload process, +// possible completing the upload with a checksum and length. +func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { + +} + +// CancelLayerUpload cancels an in-progress upload of a layer. +func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { + +} diff --git a/tags.go b/tags.go new file mode 100644 index 00000000..d8cea3d3 --- /dev/null +++ b/tags.go @@ -0,0 +1,28 @@ +package registry + +import ( + "net/http" + + "github.com/gorilla/handlers" +) + +// tagsDispatcher constructs the tags handler api endpoint. +func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { + tagsHandler := &tagsHandler{ + Context: ctx, + } + + return handlers.MethodHandler{ + "GET": http.HandlerFunc(tagsHandler.GetTags), + } +} + +// tagsHandler handles requests for lists of tags under a repository name. +type tagsHandler struct { + *Context +} + +// GetTags returns a json list of tags for a specific image name. +func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { + // TODO(stevvooe): Implement this method. +} diff --git a/util.go b/util.go new file mode 100644 index 00000000..976ddf31 --- /dev/null +++ b/util.go @@ -0,0 +1,27 @@ +package registry + +import ( + "net/http" + "reflect" + "runtime" + + "github.com/gorilla/handlers" +) + +// functionName returns the name of the function fn. +func functionName(fn interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +// resolveHandlerName attempts to resolve a nice, pretty name for the passed +// in handler. +func resolveHandlerName(method string, handler http.Handler) string { + switch v := handler.(type) { + case handlers.MethodHandler: + return functionName(v[method]) + case http.HandlerFunc: + return functionName(v) + default: + return functionName(handler.ServeHTTP) + } +} From 31df62064d58c3702730489b14f06add80d23597 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 6 Nov 2014 14:06:16 -0800 Subject: [PATCH 028/165] Adds logic for tracking ipc storage driver process status This allows requests to not hang if the child process exits --- storagedriver/ipc/client.go | 169 +++++++++++++++++++++++++++++------- 1 file changed, 138 insertions(+), 31 deletions(-) diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index ad4fe5ee..7f41081a 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -3,6 +3,7 @@ package ipc import ( "bytes" "encoding/json" + "fmt" "io" "io/ioutil" "net" @@ -15,23 +16,29 @@ import ( "github.com/docker/libchan/spdy" ) -// StorageDriverExecutablePrefix is the prefix which the IPC storage driver loader expects driver -// executables to begin with. For example, the s3 driver should be named "registry-storage-s3". +// StorageDriverExecutablePrefix is the prefix which the IPC storage driver +// loader expects driver executables to begin with. For example, the s3 driver +// should be named "registry-storagedriver-s3". const StorageDriverExecutablePrefix = "registry-storagedriver-" -// StorageDriverClient is a storagedriver.StorageDriver implementation using a managed child process -// communicating over IPC using libchan with a unix domain socket +// StorageDriverClient is a storagedriver.StorageDriver implementation using a +// managed child process communicating over IPC using libchan with a unix domain +// socket type StorageDriverClient struct { subprocess *exec.Cmd + exitChan chan error + exitErr error + stopChan chan struct{} socket *os.File transport *spdy.Transport sender libchan.Sender version storagedriver.Version } -// NewDriverClient constructs a new out-of-process storage driver using the driver name and -// configuration parameters -// A user must call Start on this driver client before remote method calls can be made +// NewDriverClient constructs a new out-of-process storage driver using the +// driver name and configuration parameters +// A user must call Start on this driver client before remote method calls can +// be made // // Looks for drivers in the following locations in order: // - Storage drivers directory (to be determined, yet not implemented) @@ -56,9 +63,13 @@ func NewDriverClient(name string, parameters map[string]string) (*StorageDriverC }, nil } -// Start starts the designated child process storage driver and binds a socket to this process for -// IPC method calls +// Start starts the designated child process storage driver and binds a socket +// to this process for IPC method calls func (driver *StorageDriverClient) Start() error { + driver.exitErr = nil + driver.exitChan = make(chan error) + driver.stopChan = make(chan struct{}) + fileDescriptors, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0) if err != nil { return err @@ -76,6 +87,8 @@ func (driver *StorageDriverClient) Start() error { return err } + go driver.handleSubprocessExit() + if err = childSocket.Close(); err != nil { driver.Stop() return err @@ -142,6 +155,10 @@ func (driver *StorageDriverClient) Stop() error { if driver.subprocess != nil { killErr = driver.subprocess.Process.Kill() } + if driver.stopChan != nil { + driver.stopChan <- struct{}{} + close(driver.stopChan) + } if closeSenderErr != nil { return closeSenderErr @@ -150,12 +167,17 @@ func (driver *StorageDriverClient) Stop() error { } else if closeSocketErr != nil { return closeSocketErr } + return killErr } // Implement the storagedriver.StorageDriver interface over IPC func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { + if err := driver.exited(); err != nil { + return nil, err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path} @@ -164,8 +186,8 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { return nil, err } - var response ReadStreamResponse - err = receiver.Receive(&response) + response := new(ReadStreamResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return nil, err } @@ -183,6 +205,10 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { } func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { + if err := driver.exited(); err != nil { + return err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path, "Reader": ioutil.NopCloser(bytes.NewReader(contents))} @@ -191,8 +217,8 @@ func (driver *StorageDriverClient) PutContent(path string, contents []byte) erro return err } - var response WriteStreamResponse - err = receiver.Receive(&response) + response := new(WriteStreamResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return err } @@ -205,16 +231,19 @@ func (driver *StorageDriverClient) PutContent(path string, contents []byte) erro } func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.ReadCloser, error) { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return nil, err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path, "Offset": offset} err := driver.sender.Send(&Request{Type: "ReadStream", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return nil, err } - var response ReadStreamResponse - err = receiver.Receive(&response) + response := new(ReadStreamResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return nil, err } @@ -227,16 +256,19 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re } func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": ioutil.NopCloser(reader)} err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err } - var response WriteStreamResponse - err = receiver.Receive(&response) + response := new(WriteStreamResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return err } @@ -249,16 +281,19 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, } func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return 0, err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path} err := driver.sender.Send(&Request{Type: "CurrentSize", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return 0, err } - var response CurrentSizeResponse - err = receiver.Receive(&response) + response := new(CurrentSizeResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return 0, err } @@ -271,16 +306,19 @@ func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { } func (driver *StorageDriverClient) List(path string) ([]string, error) { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return nil, err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path} err := driver.sender.Send(&Request{Type: "List", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return nil, err } - var response ListResponse - err = receiver.Receive(&response) + response := new(ListResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return nil, err } @@ -293,16 +331,19 @@ func (driver *StorageDriverClient) List(path string) ([]string, error) { } func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"SourcePath": sourcePath, "DestPath": destPath} err := driver.sender.Send(&Request{Type: "Move", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err } - var response MoveResponse - err = receiver.Receive(&response) + response := new(MoveResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return err } @@ -315,16 +356,19 @@ func (driver *StorageDriverClient) Move(sourcePath string, destPath string) erro } func (driver *StorageDriverClient) Delete(path string) error { - receiver, remoteSender := libchan.Pipe() + if err := driver.exited(); err != nil { + return err + } + receiver, remoteSender := libchan.Pipe() params := map[string]interface{}{"Path": path} err := driver.sender.Send(&Request{Type: "Delete", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err } - var response DeleteResponse - err = receiver.Receive(&response) + response := new(DeleteResponse) + err = driver.receiveResponse(receiver, response) if err != nil { return err } @@ -335,3 +379,66 @@ func (driver *StorageDriverClient) Delete(path string) error { return nil } + +// handleSubprocessExit populates the exit channel until we have explicitly +// stopped the storage driver subprocess +// Requests can select on driver.exitChan and response receiving and not hang if +// the process exits +func (driver *StorageDriverClient) handleSubprocessExit() { + exitErr := driver.subprocess.Wait() + if exitErr == nil { + exitErr = fmt.Errorf("Storage driver subprocess already exited cleanly") + } else { + exitErr = fmt.Errorf("Storage driver subprocess exited with error: %s", exitErr) + } + + driver.exitErr = exitErr + + for { + select { + case driver.exitChan <- exitErr: + case <-driver.stopChan: + close(driver.exitChan) + return + } + } +} + +// receiveResponse populates the response value with the next result from the +// given receiver, or returns an error if receiving failed or the driver has +// stopped +func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { + receiveChan := make(chan error, 1) + go func(receiveChan chan<- error) { + defer close(receiveChan) + receiveChan <- receiver.Receive(response) + }(receiveChan) + + var err error + var ok bool + select { + case err = <-receiveChan: + case err, ok = <-driver.exitChan: + go func(receiveChan <-chan error) { + <-receiveChan + }(receiveChan) + if !ok { + err = driver.exitErr + } + } + + return err +} + +// exited returns an exit error if the driver has exited or nil otherwise +func (driver *StorageDriverClient) exited() error { + select { + case err, ok := <-driver.exitChan: + if !ok { + return driver.exitErr + } + return err + default: + return nil + } +} From 53bd19b98f3bf58717d39fd8adb17acab45df89e Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 10 Nov 2014 19:58:10 -0800 Subject: [PATCH 029/165] Adds a low level registry http client interface and implementation --- client/client.go | 506 +++++++++++++++++++++++++++++++++++++++++++++++ errors.go | 95 ++++++++- images.go | 47 +++++ 3 files changed, 641 insertions(+), 7 deletions(-) create mode 100644 client/client.go diff --git a/client/client.go b/client/client.go new file mode 100644 index 00000000..1270e256 --- /dev/null +++ b/client/client.go @@ -0,0 +1,506 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strconv" + + "github.com/docker/docker-registry" +) + +// Client implements the client interface to the registry http api +type Client interface { + // GetImageManifest returns an image manifest for the image at the given + // name, tag pair + GetImageManifest(name, tag string) (*registry.ImageManifest, error) + + // PutImageManifest uploads an image manifest for the image at the given + // name, tag pair + PutImageManifest(name, tag string, imageManifest *registry.ImageManifest) error + + // DeleteImage removes the image at the given name, tag pair + DeleteImage(name, tag string) error + + // ListImageTags returns a list of all image tags with the given repository + // name + ListImageTags(name string) ([]string, error) + + // GetImageLayer returns the image layer at the given name, tarsum pair in + // the form of an io.ReadCloser with the length of this layer + // A nonzero byteOffset can be provided to receive a partial layer beginning + // at the given offset + GetImageLayer(name, tarsum string, byteOffset int) (io.ReadCloser, int, error) + + // InitiateLayerUpload starts an image upload for the given name, tarsum + // pair and returns a unique location url to use for other layer upload + // methods + // Returns a *registry.LayerAlreadyExistsError if the layer already exists + // on the registry + InitiateLayerUpload(name, tarsum string) (string, error) + + // GetLayerUploadStatus returns the byte offset and length of the layer at + // the given upload location + GetLayerUploadStatus(location string) (int, int, error) + + // UploadLayer uploads a full image layer to the registry + UploadLayer(location string, layer io.ReadCloser, length int, checksum *registry.Checksum) error + + // UploadLayerChunk uploads a layer chunk with a given length and startByte + // to the registry + // FinishChunkedLayerUpload must be called to finalize this upload + UploadLayerChunk(location string, layerChunk io.ReadCloser, length, startByte int) error + + // FinishChunkedLayerUpload completes a chunked layer upload at a given + // location + FinishChunkedLayerUpload(location string, length int, checksum *registry.Checksum) error + + // CancelLayerUpload deletes all content at the unfinished layer upload + // location and invalidates any future calls to this layer upload + CancelLayerUpload(location string) error +} + +// New returns a new Client which operates against a registry with the +// given base endpoint +// This endpoint should not include /v2/ or any part of the url after this +func New(endpoint string) Client { + return &clientImpl{endpoint} +} + +// clientImpl is the default implementation of the Client interface +type clientImpl struct { + Endpoint string +} + +// TODO(bbland): use consistent route generation between server and client + +func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest, error) { + response, err := http.Get(r.imageManifestUrl(name, tag)) + if err != nil { + return nil, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + break + case response.StatusCode == http.StatusNotFound: + return nil, ®istry.ImageManifestNotFoundError{name, tag} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return nil, err + } + return nil, errors + default: + return nil, ®istry.UnexpectedHttpStatusError{response.Status} + } + + decoder := json.NewDecoder(response.Body) + + manifest := new(registry.ImageManifest) + err = decoder.Decode(manifest) + if err != nil { + return nil, err + } + return manifest, nil +} + +func (r *clientImpl) PutImageManifest(name, tag string, manifest *registry.ImageManifest) error { + manifestBytes, err := json.Marshal(manifest) + if err != nil { + return err + } + + putRequest, err := http.NewRequest("PUT", + r.imageManifestUrl(name, tag), bytes.NewReader(manifestBytes)) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + return nil + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) DeleteImage(name, tag string) error { + deleteRequest, err := http.NewRequest("DELETE", + r.imageManifestUrl(name, tag), nil) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(deleteRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + break + case response.StatusCode == http.StatusNotFound: + return ®istry.ImageManifestNotFoundError{name, tag} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } + + return nil +} + +func (r *clientImpl) ListImageTags(name string) ([]string, error) { + response, err := http.Get(fmt.Sprintf("%s/v2/%s/tags", r.Endpoint, name)) + if err != nil { + return nil, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + break + case response.StatusCode == http.StatusNotFound: + return nil, ®istry.RepositoryNotFoundError{name} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return nil, err + } + return nil, errors + default: + return nil, ®istry.UnexpectedHttpStatusError{response.Status} + } + + tags := struct { + Tags []string `json:"tags"` + }{} + + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&tags) + if err != nil { + return nil, err + } + + return tags.Tags, nil +} + +func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.ReadCloser, int, error) { + getRequest, err := http.NewRequest("GET", + fmt.Sprintf("%s/v2/%s/layer/%s", r.Endpoint, name, tarsum), nil) + if err != nil { + return nil, 0, err + } + + getRequest.Header.Add("Range", fmt.Sprintf("%d-", byteOffset)) + response, err := http.DefaultClient.Do(getRequest) + if err != nil { + return nil, 0, err + } + + if response.StatusCode == http.StatusNotFound { + return nil, 0, ®istry.LayerNotFoundError{name, tarsum} + } + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + lengthHeader := response.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 0) + if err != nil { + return nil, 0, err + } + return response.Body, int(length), nil + case response.StatusCode == http.StatusNotFound: + response.Body.Close() + return nil, 0, ®istry.LayerNotFoundError{name, tarsum} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return nil, 0, err + } + return nil, 0, errors + default: + response.Body.Close() + return nil, 0, ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { + postRequest, err := http.NewRequest("POST", + fmt.Sprintf("%s/v2/%s/layer/%s/upload", r.Endpoint, name, tarsum), nil) + if err != nil { + return "", err + } + + response, err := http.DefaultClient.Do(postRequest) + if err != nil { + return "", err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusAccepted: + return response.Header.Get("Location"), nil + case response.StatusCode == http.StatusNotModified: + return "", ®istry.LayerAlreadyExistsError{name, tarsum} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return "", err + } + return "", errors + default: + return "", ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { + response, err := http.Get(fmt.Sprintf("%s%s", r.Endpoint, location)) + if err != nil { + return 0, 0, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + return parseRangeHeader(response.Header.Get("Range")) + case response.StatusCode == http.StatusNotFound: + return 0, 0, ®istry.LayerUploadNotFoundError{location} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return 0, 0, err + } + return 0, 0, errors + default: + return 0, 0, ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length int, checksum *registry.Checksum) error { + defer layer.Close() + + putRequest, err := http.NewRequest("PUT", + fmt.Sprintf("%s%s", r.Endpoint, location), layer) + if err != nil { + return err + } + + queryValues := new(url.Values) + queryValues.Set("length", fmt.Sprint(length)) + queryValues.Set(checksum.HashAlgorithm, checksum.Sum) + putRequest.URL.RawQuery = queryValues.Encode() + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", fmt.Sprint(length)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusCreated: + return nil + case response.StatusCode == http.StatusNotFound: + return ®istry.LayerUploadNotFoundError{location} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, length, startByte int) error { + defer layerChunk.Close() + + putRequest, err := http.NewRequest("PUT", + fmt.Sprintf("%s%s", r.Endpoint, location), layerChunk) + if err != nil { + return err + } + + endByte := startByte + length + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", fmt.Sprint(length)) + putRequest.Header.Set("Content-Range", + fmt.Sprintf("%d-%d/%d", startByte, endByte, endByte)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusAccepted: + return nil + case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: + lastValidRange, layerSize, err := parseRangeHeader(response.Header.Get("Range")) + if err != nil { + return err + } + return ®istry.LayerUploadInvalidRangeError{location, lastValidRange, layerSize} + case response.StatusCode == http.StatusNotFound: + return ®istry.LayerUploadNotFoundError{location} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, checksum *registry.Checksum) error { + putRequest, err := http.NewRequest("PUT", + fmt.Sprintf("%s%s", r.Endpoint, location), nil) + if err != nil { + return err + } + + queryValues := new(url.Values) + queryValues.Set("length", fmt.Sprint(length)) + queryValues.Set(checksum.HashAlgorithm, checksum.Sum) + putRequest.URL.RawQuery = queryValues.Encode() + + putRequest.Header.Set("Content-Type", "application/octet-stream") + putRequest.Header.Set("Content-Length", "0") + putRequest.Header.Set("Content-Range", + fmt.Sprintf("%d-%d/%d", length, length, length)) + + response, err := http.DefaultClient.Do(putRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusCreated: + return nil + case response.StatusCode == http.StatusNotFound: + return ®istry.LayerUploadNotFoundError{location} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +func (r *clientImpl) CancelLayerUpload(location string) error { + deleteRequest, err := http.NewRequest("DELETE", + fmt.Sprintf("%s%s", r.Endpoint, location), nil) + if err != nil { + return err + } + + response, err := http.DefaultClient.Do(deleteRequest) + if err != nil { + return err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusNoContent: + return nil + case response.StatusCode == http.StatusNotFound: + return ®istry.LayerUploadNotFoundError{location} + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return err + } + return errors + default: + return ®istry.UnexpectedHttpStatusError{response.Status} + } +} + +// imageManifestUrl is a helper method for returning the full url to an image +// manifest +func (r *clientImpl) imageManifestUrl(name, tag string) string { + return fmt.Sprintf("%s/v2/%s/image/%s", r.Endpoint, name, tag) +} + +// parseRangeHeader parses out the offset and length from a returned Range +// header +func parseRangeHeader(byteRangeHeader string) (int, int, error) { + r := regexp.MustCompile("bytes=0-(\\d+)/(\\d+)") + submatches := r.FindStringSubmatch(byteRangeHeader) + offset, err := strconv.ParseInt(submatches[1], 10, 0) + if err != nil { + return 0, 0, err + } + length, err := strconv.ParseInt(submatches[2], 10, 0) + if err != nil { + return 0, 0, err + } + return int(offset), int(length), nil +} diff --git a/errors.go b/errors.go index 53dcb6bf..29cfdd70 100644 --- a/errors.go +++ b/errors.go @@ -162,16 +162,97 @@ func (errs *Errors) Error() string { } } -// detailUnknownLayer provides detail for unknown layer errors, returned by +// DetailUnknownLayer provides detail for unknown layer errors, returned by // image manifest push for layers that are not yet transferred. This intended // to only be used on the backend to return detail for this specific error. type DetailUnknownLayer struct { // Unknown should contain the contents of a layer descriptor, which is a - // single json object with the key "blobSum" currently. - Unknown struct { - - // BlobSum contains the uniquely identifying tarsum of the layer. - BlobSum string `json:"blobSum"` - } `json:"unknown"` + // single FSLayer currently. + Unknown FSLayer `json:"unknown"` +} + +// RepositoryNotFoundError is returned when making an operation against a +// repository that does not exist in the registry +type RepositoryNotFoundError struct { + Name string +} + +func (e *RepositoryNotFoundError) Error() string { + return fmt.Sprintf("No repository found with Name: %s", e.Name) +} + +// ImageManifestNotFoundError is returned when making an operation against a +// given image manifest that does not exist in the registry +type ImageManifestNotFoundError struct { + Name string + Tag string +} + +func (e *ImageManifestNotFoundError) Error() string { + return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", + e.Name, e.Tag) +} + +// LayerAlreadyExistsError is returned when attempting to create a new layer +// that already exists in the registry +type LayerAlreadyExistsError struct { + Name string + TarSum string +} + +func (e *LayerAlreadyExistsError) Error() string { + return fmt.Sprintf("Layer already found with Name: %s, TarSum: %s", + e.Name, e.TarSum) +} + +// LayerNotFoundError is returned when making an operation against a given image +// layer that does not exist in the registry +type LayerNotFoundError struct { + Name string + TarSum string +} + +func (e *LayerNotFoundError) Error() string { + return fmt.Sprintf("No layer found with Name: %s, TarSum: %s", + e.Name, e.TarSum) +} + +// LayerUploadNotFoundError is returned when making a layer upload operation +// against an invalid layer upload location url +// This may be the result of using a cancelled, completed, or stale upload +// locationn +type LayerUploadNotFoundError struct { + Location string +} + +func (e *LayerUploadNotFoundError) Error() string { + return fmt.Sprintf("No layer found upload found at Location: %s", + e.Location) +} + +// LayerUploadInvalidRangeError is returned when attempting to upload an image +// layer chunk that is out of order +// This provides the known LayerSize and LastValidRange which can be used to +// resume the upload +type LayerUploadInvalidRangeError struct { + Location string + LastValidRange int + LayerSize int +} + +func (e *LayerUploadInvalidRangeError) Error() string { + return fmt.Sprintf( + "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Layer Size: %d", + e.Location, e.LastValidRange, e.LayerSize) +} + +// UnexpectedHttpStatusError is returned when an unexpected http status is +// returned when making a registry api call +type UnexpectedHttpStatusError struct { + Status string +} + +func (e *UnexpectedHttpStatusError) Error() string { + return fmt.Sprintf("Received unexpected http status: %s", e.Status) } diff --git a/images.go b/images.go index f16a3560..927a4b60 100644 --- a/images.go +++ b/images.go @@ -6,6 +6,53 @@ import ( "github.com/gorilla/handlers" ) +// ImageManifest defines the structure of an image manifest +type ImageManifest struct { + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []ManifestHistory `json:"history"` + + // Signature is the JWT with which the image is signed + Signature string `json:"signature,omitempty"` + + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum string `json:"blobSum"` +} + +// ManifestHistory stores unstructured v1 compatibility information +type ManifestHistory struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Checksum is a container struct for an image checksum +type Checksum struct { + // HashAlgorithm is the algorithm used to compute the checksum + // Supported values: md5, sha1, sha256, sha512 + HashAlgorithm string + + // Sum is the actual checksum value for the given HashAlgorithm + Sum string +} + // imageManifestDispatcher takes the request context and builds the // appropriate handler for handling image manifest requests. func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { From b25e16a56ce7fb6decc81909263a93e438f2b064 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 12 Nov 2014 15:26:35 -0800 Subject: [PATCH 030/165] Adds Raw bytes field to ImageManifest This can be used for proper json signature validation --- images.go | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/images.go b/images.go index 927a4b60..84fe217d 100644 --- a/images.go +++ b/images.go @@ -1,6 +1,7 @@ package registry import ( + "encoding/json" "net/http" "github.com/gorilla/handlers" @@ -24,11 +25,27 @@ type ImageManifest struct { // History is a list of unstructured historical data for v1 compatibility History []ManifestHistory `json:"history"` - // Signature is the JWT with which the image is signed - Signature string `json:"signature,omitempty"` - // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` + + // Raw is the byte representation of the ImageManifest, used for signature + // verification + Raw []byte `json:"-"` +} + +// imageManifest is used to avoid recursion in unmarshaling +type imageManifest ImageManifest + +func (m *ImageManifest) UnmarshalJSON(b []byte) error { + var manifest imageManifest + err := json.Unmarshal(b, &manifest) + if err != nil { + return err + } + + *m = ImageManifest(manifest) + m.Raw = b + return nil } // FSLayer is a container struct for BlobSums defined in an image manifest From 375f3cc1365dd0f22553e85c973a538cb7453f58 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 12 Nov 2014 16:39:35 -0800 Subject: [PATCH 031/165] Define common regexps used across registry application This commit adds regular expression definitions for several string identifiers used througout the registry. The repository name regex supports up to five path path components and restricts repeated periods, dashes and underscores. The tag regex simply validates the length of the tag and that printable characters are required. Though we define a new package common, these definition should land in docker core. --- common/names.go | 19 +++++++++++ common/names_test.go | 62 +++++++++++++++++++++++++++++++++ common/tarsum.go | 70 ++++++++++++++++++++++++++++++++++++++ common/tarsum_test.go | 79 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 230 insertions(+) create mode 100644 common/names.go create mode 100644 common/names_test.go create mode 100644 common/tarsum.go create mode 100644 common/tarsum_test.go diff --git a/common/names.go b/common/names.go new file mode 100644 index 00000000..ce25e487 --- /dev/null +++ b/common/names.go @@ -0,0 +1,19 @@ +package common + +import ( + "regexp" +) + +// RepositoryNameComponentRegexp restricts registtry path components names to +// start with at least two letters or numbers, with following parts able to +// separated by one period, dash or underscore. +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9](?:[a-z0-9]+[._-]?)*[a-z0-9]`) + +// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 2 to +// 5 path components, separated by a forward slash. +var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){1,4}` + RepositoryNameComponentRegexp.String()) + +// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. +var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + +// TODO(stevvooe): Contribute these exports back to core, so they are shared. diff --git a/common/names_test.go b/common/names_test.go new file mode 100644 index 00000000..17655984 --- /dev/null +++ b/common/names_test.go @@ -0,0 +1,62 @@ +package common + +import ( + "testing" +) + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range []struct { + input string + valid bool + }{ + { + input: "simple/name", + valid: true, + }, + { + input: "library/ubuntu", + valid: true, + }, + { + input: "docker/stevvooe/app", + valid: true, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + valid: true, + }, + { + input: "a/a/a/a/a/a/b/b/b/b", + valid: false, + }, + { + input: "a/a/a/a/", + valid: false, + }, + { + input: "foo.com/bar/baz", + valid: true, + }, + { + input: "blog.foo.com/bar/baz", + valid: true, + }, + { + input: "asdf", + valid: false, + }, + { + input: "asdf$$^/", + valid: false, + }, + } { + if RepositoryNameRegexp.MatchString(testcase.input) != testcase.valid { + status := "invalid" + if testcase.valid { + status = "valid" + } + + t.Fatalf("expected %q to be %s repository name", testcase.input, status) + } + } +} diff --git a/common/tarsum.go b/common/tarsum.go new file mode 100644 index 00000000..5a6e7d21 --- /dev/null +++ b/common/tarsum.go @@ -0,0 +1,70 @@ +package common + +import ( + "fmt" + + "regexp" +) + +// TarSumRegexp defines a reguler expression to match tarsum identifiers. +var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") + +// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with +// capture groups corresponding to each component. +var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)") + +// TarSumInfo contains information about a parsed tarsum. +type TarSumInfo struct { + // Version contains the version of the tarsum. + Version string + + // Algorithm contains the algorithm for the final digest + Algorithm string + + // Digest contains the hex-encoded digest. + Digest string +} + +type InvalidTarSumError struct { + TarSum string +} + +func (e InvalidTarSumError) Error() string { + return fmt.Sprintf("invalid tarsum: %q", e.TarSum) +} + +// ParseTarSum parses a tarsum string into its components of interest. For +// example, this method may receive the tarsum in the following format: +// +// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e +// +// The function will return the following: +// +// TarSumInfo{ +// Version: "v1", +// Algorithm: "sha256", +// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", +// } +// +func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) { + components := TarsumRegexpCapturing.FindStringSubmatch(tarSum) + + if len(components) != 1+TarsumRegexpCapturing.NumSubexp() { + return TarSumInfo{}, InvalidTarSumError{TarSum: tarSum} + } + + return TarSumInfo{ + Version: components[3], + Algorithm: components[4], + Digest: components[5], + }, nil +} + +// String returns the valid, string representation of the tarsum info. +func (tsi TarSumInfo) String() string { + if tsi.Version == "" { + return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest) + } + + return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest) +} diff --git a/common/tarsum_test.go b/common/tarsum_test.go new file mode 100644 index 00000000..e860c9cd --- /dev/null +++ b/common/tarsum_test.go @@ -0,0 +1,79 @@ +package common + +import ( + "reflect" + "testing" +) + +func TestParseTarSumComponents(t *testing.T) { + for _, testcase := range []struct { + input string + expected TarSumInfo + err error + }{ + { + input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + expected: TarSumInfo{ + Version: "v1", + Algorithm: "sha256", + Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + }, + }, + { + input: "", + err: InvalidTarSumError{}, + }, + { + input: "purejunk", + err: InvalidTarSumError{TarSum: "purejunk"}, + }, + { + input: "tarsum.v23+test:12341234123412341effefefe", + expected: TarSumInfo{ + Version: "v23", + Algorithm: "test", + Digest: "12341234123412341effefefe", + }, + }, + + // The following test cases are ported from docker core + { + // Version 0 tarsum + input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + expected: TarSumInfo{ + Algorithm: "sha256", + Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + }, + { + // Dev version tarsum + input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + expected: TarSumInfo{ + Version: "dev", + Algorithm: "sha256", + Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + }, + } { + tsi, err := ParseTarSum(testcase.input) + if err != nil { + if testcase.err != nil && err == testcase.err { + continue // passes + } + + t.Fatalf("unexpected error parsing tarsum: %v", err) + } + + if testcase.err != nil { + t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err) + } + + if !reflect.DeepEqual(tsi, testcase.expected) { + t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected) + } + + if testcase.input != tsi.String() { + t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input) + } + } +} From 145c89bb94e5bf1770097f31fecdb407067b628c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 12 Nov 2014 16:59:50 -0800 Subject: [PATCH 032/165] Disambiguate routing for multi-level repository names To be able to support multi-level repository names, the API has been adjusted to disabiguate routes tagged image manifest routes and tag list routes. With this effort, the regular expressions have been defined in a single place to reduce repitition and ensure that validation is consistent across the registry. The router was also refactored to remove the use of subrouters, simplifying the route definition code. This also reduces the number of regular expression match checks during the routing process. --- app_test.go | 6 +-- routes.go | 38 ++++++---------- routes_test.go | 121 ++++++++++++++++++++++++++++++------------------- 3 files changed, 90 insertions(+), 75 deletions(-) diff --git a/app_test.go b/app_test.go index 43b001ec..e0fa727f 100644 --- a/app_test.go +++ b/app_test.go @@ -87,21 +87,21 @@ func TestAppDispatcher(t *testing.T) { endpoint: routeNameLayer, vars: []string{ "name", "foo/bar", - "tarsum", "thetarsum", + "tarsum", "tarsum.v1+bogus:abcdef0123456789", }, }, { endpoint: routeNameLayerUpload, vars: []string{ "name", "foo/bar", - "tarsum", "thetarsum", + "tarsum", "tarsum.v1+bogus:abcdef0123456789", }, }, { endpoint: routeNameLayerUploadResume, vars: []string{ "name", "foo/bar", - "tarsum", "thetarsum", + "tarsum", "tarsum.v1+bogus:abcdef0123456789", "uuid", "theuuid", }, }, diff --git a/routes.go b/routes.go index d4942696..8da7c3e2 100644 --- a/routes.go +++ b/routes.go @@ -1,12 +1,11 @@ package registry import ( + "github.com/docker/docker-registry/common" "github.com/gorilla/mux" ) const ( - routeNameRoot = "root" - routeNameName = "name" routeNameImageManifest = "image-manifest" routeNameTags = "tags" routeNameLayer = "layer" @@ -25,47 +24,36 @@ var allEndpoints = []string{ // v2APIRouter builds a gorilla router with named routes for the various API // methods. We may export this for use by the client. func v2APIRouter() *mux.Router { - router := mux.NewRouter() - - rootRouter := router. - PathPrefix("/v2"). - Name(routeNameRoot). - Subrouter() - - // All routes are subordinate to named routes - namedRouter := rootRouter. - PathPrefix("/{name:[A-Za-z0-9-_]+/[A-Za-z0-9-_]+}"). // TODO(stevvooe): Verify this format with core - Name(routeNameName). - Subrouter(). + router := mux.NewRouter(). StrictSlash(true) // GET /v2//image/ Image Manifest Fetch the image manifest identified by name and tag. // PUT /v2//image/ Image Manifest Upload the image manifest identified by name and tag. // DELETE /v2//image/ Image Manifest Delete the image identified by name and tag. - namedRouter. - Path("/image/{tag:[A-Za-z0-9-_]+}"). + router. + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/image/{tag:" + common.TagNameRegexp.String() + "}"). Name(routeNameImageManifest) - // GET /v2//tags Tags Fetch the tags under the repository identified by name. - namedRouter. - Path("/tags"). + // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. + router. + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/tags/list"). Name(routeNameTags) // GET /v2//layer/ Layer Fetch the layer identified by tarsum. - namedRouter. - Path("/layer/{tarsum}"). + router. + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}"). Name(routeNameLayer) // POST /v2//layer//upload/ Layer Upload Initiate an upload of the layer identified by tarsum. Requires length and a checksum parameter. - namedRouter. - Path("/layer/{tarsum}/upload/"). + router. + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}/upload/"). Name(routeNameLayerUpload) // GET /v2//layer//upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. // PUT /v2//layer//upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. // DELETE /v2//layer//upload/ Layer Upload Cancel the upload identified by layer and uuid - namedRouter. - Path("/layer/{tarsum}/upload/{uuid}"). + router. + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}/upload/{uuid}"). Name(routeNameLayerUploadResume) return router diff --git a/routes_test.go b/routes_test.go index e3ef371a..8907684a 100644 --- a/routes_test.go +++ b/routes_test.go @@ -10,9 +10,10 @@ import ( "github.com/gorilla/mux" ) -type routeInfo struct { +type routeTestCase struct { RequestURI string Vars map[string]string + RouteName string } // TestRouter registers a test handler with all the routes and ensures that @@ -26,14 +27,15 @@ func TestRouter(t *testing.T) { router := v2APIRouter() testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - routeInfo := routeInfo{ + testCase := routeTestCase{ RequestURI: r.RequestURI, Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), } enc := json.NewEncoder(w) - if err := enc.Encode(routeInfo); err != nil { + if err := enc.Encode(testCase); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -42,64 +44,81 @@ func TestRouter(t *testing.T) { // Startup test server server := httptest.NewServer(router) - for _, testcase := range []struct { - routeName string - expectedRouteInfo routeInfo - }{ + for _, testcase := range []routeTestCase{ { - routeName: routeNameImageManifest, - expectedRouteInfo: routeInfo{ - RequestURI: "/v2/foo/bar/image/tag", - Vars: map[string]string{ - "name": "foo/bar", - "tag": "tag", - }, + RouteName: routeNameImageManifest, + RequestURI: "/v2/foo/bar/image/tag", + Vars: map[string]string{ + "name": "foo/bar", + "tag": "tag", }, }, { - routeName: routeNameTags, - expectedRouteInfo: routeInfo{ - RequestURI: "/v2/foo/bar/tags", - Vars: map[string]string{ - "name": "foo/bar", - }, + RouteName: routeNameTags, + RequestURI: "/v2/foo/bar/tags/list", + Vars: map[string]string{ + "name": "foo/bar", }, }, { - routeName: routeNameLayer, - expectedRouteInfo: routeInfo{ - RequestURI: "/v2/foo/bar/layer/tarsum", - Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum", - }, + RouteName: routeNameLayer, + RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum.dev+foo:abcdef0919234", }, }, { - routeName: routeNameLayerUpload, - expectedRouteInfo: routeInfo{ - RequestURI: "/v2/foo/bar/layer/tarsum/upload/", - Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum", - }, + RouteName: routeNameLayerUpload, + RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum.dev+foo:abcdef0919234", }, }, { - routeName: routeNameLayerUploadResume, - expectedRouteInfo: routeInfo{ - RequestURI: "/v2/foo/bar/layer/tarsum/upload/uuid", - Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum", - "uuid": "uuid", - }, + RouteName: routeNameLayerUploadResume, + RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/uuid", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum.dev+foo:abcdef0919234", + "uuid": "uuid", + }, + }, + { + RouteName: routeNameLayerUploadResume, + RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum.dev+foo:abcdef0919234", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + + { + // Check ambiguity: ensure we can distinguish between tags for + // "foo/bar/image/image" and image for "foo/bar/image" with tag + // "tags" + RouteName: routeNameImageManifest, + RequestURI: "/v2/foo/bar/image/image/tags", + Vars: map[string]string{ + "name": "foo/bar/image", + "tag": "tags", + }, + }, + { + // This case presents an ambiguity between foo/bar with tag="tags" + // and list tags for "foo/bar/image" + RouteName: routeNameTags, + RequestURI: "/v2/foo/bar/image/tags/list", + Vars: map[string]string{ + "name": "foo/bar/image", }, }, } { // Register the endpoint - router.GetRoute(testcase.routeName).Handler(testHandler) - u := server.URL + testcase.expectedRouteInfo.RequestURI + router.GetRoute(testcase.RouteName).Handler(testHandler) + u := server.URL + testcase.RequestURI resp, err := http.Get(u) @@ -107,15 +126,23 @@ func TestRouter(t *testing.T) { t.Fatalf("error issuing get request: %v", err) } + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) + } + dec := json.NewDecoder(resp.Body) - var actualRouteInfo routeInfo + var actualRouteInfo routeTestCase if err := dec.Decode(&actualRouteInfo); err != nil { t.Fatalf("error reading json response: %v", err) } - if !reflect.DeepEqual(actualRouteInfo, testcase.expectedRouteInfo) { - t.Fatalf("actual does not equal expected: %v != %v", actualRouteInfo, testcase.expectedRouteInfo) + if actualRouteInfo.RouteName != testcase.RouteName { + t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) + } + + if !reflect.DeepEqual(actualRouteInfo, testcase) { + t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } } From c8ea224f9c8e7257315bb58c31335e13f10bed7a Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 12 Nov 2014 17:19:19 -0800 Subject: [PATCH 033/165] Miscellaneous go vet fixes Fixes some format strings and uses keyed fields for struct construction --- configuration/configuration.go | 6 +++--- errors_test.go | 4 ++-- storagedriver/filesystem/filesystem.go | 10 +++++----- storagedriver/inmemory/inmemory.go | 10 +++++----- storagedriver/ipc/server.go | 1 - storagedriver/s3/s3.go | 12 +++++++----- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index 901a2571..c9bb72e3 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -14,13 +14,13 @@ import ( // optionally modified by environment variables type Configuration struct { // Version is the version which defines the format of the rest of the configuration - Version Version `yaml:"version"` + Version Version `yaml:"version"` // Loglevel is the level at which registry operations are logged Loglevel Loglevel `yaml:"loglevel"` // Storage is the configuration for the registry's storage driver - Storage Storage `yaml:"storage"` + Storage Storage `yaml:"storage"` } // v_0_1_Configuration is a Version 0.1 Configuration struct @@ -162,7 +162,7 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements the yaml.Marshaler interface func (storage Storage) MarshalYAML() (interface{}, error) { - if storage.Parameters == nil { + if storage.Parameters() == nil { return storage.Type, nil } return map[string]Parameters(storage), nil diff --git a/errors_test.go b/errors_test.go index dc6a8de7..580754f5 100644 --- a/errors_test.go +++ b/errors_test.go @@ -14,7 +14,7 @@ func TestErrorCodes(t *testing.T) { } if ec.Message() != errorCodesMessages[ec] { - t.Fatalf("incorrect message for error code %v: %q != !q", ec, ec.Message(), errorCodesMessages[ec]) + t.Fatalf("incorrect message for error code %v: %q != %q", ec, ec.Message(), errorCodesMessages[ec]) } // Serialize the error code using the json library to ensure that we @@ -26,7 +26,7 @@ func TestErrorCodes(t *testing.T) { } if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v: %v", ec) + t.Fatalf("expected content in marshaled before for error code %v", ec) } // First, unmarshal to interface and ensure we have a string. diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/filesystem.go index 2cca7890..96c78160 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/filesystem.go @@ -60,7 +60,7 @@ func (d *FilesystemDriver) subPath(subPath string) string { func (d *FilesystemDriver) GetContent(path string) ([]byte, error) { contents, err := ioutil.ReadFile(d.subPath(path)) if err != nil { - return nil, storagedriver.PathNotFoundError{path} + return nil, storagedriver.PathNotFoundError{Path: path} } return contents, nil } @@ -89,7 +89,7 @@ func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser return nil, err } else if seekPos < int64(offset) { file.Close() - return nil, storagedriver.InvalidOffsetError{path, offset} + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } return file, nil @@ -104,7 +104,7 @@ func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, read } if offset > resumableOffset { - return storagedriver.InvalidOffsetError{subPath, offset} + return storagedriver.InvalidOffsetError{Path: subPath, Offset: offset} } fullPath := d.subPath(subPath) @@ -161,7 +161,7 @@ func (d *FilesystemDriver) CurrentSize(subPath string) (uint64, error) { if err != nil && !os.IsNotExist(err) { return 0, err } else if err != nil { - return 0, storagedriver.PathNotFoundError{subPath} + return 0, storagedriver.PathNotFoundError{Path: subPath} } return uint64(fileInfo.Size()), nil } @@ -200,7 +200,7 @@ func (d *FilesystemDriver) Delete(subPath string) error { if err != nil && !os.IsNotExist(err) { return err } else if err != nil { - return storagedriver.PathNotFoundError{subPath} + return storagedriver.PathNotFoundError{Path: subPath} } err = os.RemoveAll(fullPath) diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/inmemory.go index fee39bc9..14590a3a 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/inmemory.go @@ -45,7 +45,7 @@ func (d *InMemoryDriver) GetContent(path string) ([]byte, error) { defer d.mutex.RUnlock() contents, ok := d.storage[path] if !ok { - return nil, storagedriver.PathNotFoundError{path} + return nil, storagedriver.PathNotFoundError{Path: path} } return contents, nil } @@ -64,7 +64,7 @@ func (d *InMemoryDriver) ReadStream(path string, offset uint64) (io.ReadCloser, if err != nil { return nil, err } else if len(contents) < int(offset) { - return nil, storagedriver.InvalidOffsetError{path, offset} + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } src := contents[offset:] @@ -84,7 +84,7 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io } if offset > resumableOffset { - return storagedriver.InvalidOffsetError{path, offset} + return storagedriver.InvalidOffsetError{Path: path, Offset: offset} } contents, err := ioutil.ReadAll(reader) @@ -138,7 +138,7 @@ func (d *InMemoryDriver) Move(sourcePath string, destPath string) error { defer d.mutex.Unlock() contents, ok := d.storage[sourcePath] if !ok { - return storagedriver.PathNotFoundError{sourcePath} + return storagedriver.PathNotFoundError{Path: sourcePath} } d.storage[destPath] = contents delete(d.storage, sourcePath) @@ -156,7 +156,7 @@ func (d *InMemoryDriver) Delete(path string) error { } if len(subPaths) == 0 { - return storagedriver.PathNotFoundError{path} + return storagedriver.PathNotFoundError{Path: path} } for _, subPath := range subPaths { diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 9d45c94e..f374bf06 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -38,7 +38,6 @@ func StorageDriverServer(driver storagedriver.StorageDriver) error { } go receive(driver, receiver) } - return nil } } diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index c932a1e1..7ef7df6b 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -65,7 +65,7 @@ func FromParameters(parameters map[string]string) (*S3Driver, error) { } region := aws.GetRegion(regionName) if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %s", region) + return nil, fmt.Errorf("Invalid region provided: %v", region) } bucket, ok := parameters["bucket"] @@ -140,7 +140,7 @@ func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadC } if (offset) > uint64(len(parts))*chunkSize || (offset < size && offset%chunkSize != 0) { - return storagedriver.InvalidOffsetError{path, offset} + return storagedriver.InvalidOffsetError{Path: path, Offset: offset} } if len(parts) > 0 { @@ -226,7 +226,9 @@ func (d *S3Driver) List(path string) ([]string, error) { func (d *S3Driver) Move(sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(destPath, getPermissions(), s3.CopyOptions{d.getOptions(), "", d.getContentType()}, d.Bucket.Name+"/"+sourcePath) + _, err := d.Bucket.PutCopy(destPath, getPermissions(), + s3.CopyOptions{Options: d.getOptions(), MetadataDirective: "", ContentType: d.getContentType()}, + d.Bucket.Name+"/"+sourcePath) if err != nil { return err } @@ -237,7 +239,7 @@ func (d *S3Driver) Move(sourcePath string, destPath string) error { func (d *S3Driver) Delete(path string) error { listResponse, err := d.Bucket.List(path, "", "", listPartsMax) if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{path} + return storagedriver.PathNotFoundError{Path: path} } s3Objects := make([]s3.Object, listPartsMax) @@ -247,7 +249,7 @@ func (d *S3Driver) Delete(path string) error { s3Objects[index].Key = key.Key } - err := d.Bucket.DelMulti(s3.Delete{false, s3Objects[0:len(listResponse.Contents)]}) + err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) if err != nil { return nil } From 15c651b73216daaef24d0b09641511d04cde7970 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 12 Nov 2014 18:06:54 -0800 Subject: [PATCH 034/165] Simplify repository name component regexp --- common/names.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/names.go b/common/names.go index ce25e487..d856467b 100644 --- a/common/names.go +++ b/common/names.go @@ -7,7 +7,7 @@ import ( // RepositoryNameComponentRegexp restricts registtry path components names to // start with at least two letters or numbers, with following parts able to // separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9](?:[a-z0-9]+[._-]?)*[a-z0-9]`) +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]{2,}(?:[._-][a-z0-9]+)*`) // RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 2 to // 5 path components, separated by a forward slash. From 9fd57ab42bb7f8b3ed959703100933b7f37b3b70 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Fri, 7 Nov 2014 15:45:51 -0800 Subject: [PATCH 035/165] Test drone --- .drone.yml | 35 +++++++++++++++++++++++++++++++++++ .travis.yml | 5 ----- README.md | 17 +++++++++++++++++ hack/validate_gofmt.sh | 18 ++++++++++++++++++ 4 files changed, 70 insertions(+), 5 deletions(-) create mode 100644 .drone.yml delete mode 100644 .travis.yml create mode 100644 README.md create mode 100755 hack/validate_gofmt.sh diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 00000000..62352689 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,35 @@ +image: bradrydzewski/go:1.3 + +env: + - GOROOT=/usr/local/go + - PATH=$PATH:$GOROOT/bin:$GOPATH/bin + +script: + - go get code.google.com/p/go.tools/cmd/vet + - go get code.google.com/p/go.tools/cmd/godoc + - go get code.google.com/p/go.tools/cmd/cover + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get github.com/golang/lint/golint + + - go get -d -t ./... + + - hack/validate_gofmt.sh + - goveralls -v -service drone.io -repotoken $COVERALLS_TOKEN + - go vet ./... + - golint ./... + - go test -v ./... + # - go build --tags SOMETAG + +notify: + email: + recipients: + - distribution@docker.com + slack: + team: docker + channel: "#distribution" + username: mom + token: cwX6NE0KKYYQnZJVRaYWGxxJ + on_success: true + on_failure: true + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d48424c3..00000000 --- a/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: -- 1.3 -- tip diff --git a/README.md b/README.md new file mode 100644 index 00000000..0c258a94 --- /dev/null +++ b/README.md @@ -0,0 +1,17 @@ +# docker-registry + +[![Coverage Status][coverage-image]][coverage-url] +[![Build Status][build-image]][build-url] + +## TL;DR + +## License + +[Apache License](https://en.wikipedia.org/wiki/Apache_License) + +[coverage-image]: https://coveralls.io/repos/docker/docker-registry/badge.png +[coverage-url]: https://coveralls.io/r/docker/docker-registry + +[build-image]: https://ci.dockerproject.com/github.com/docker/docker-registry/status.svg?branch=next-generation +[build-url]: https://ci.dockerproject.com/github.com/docker/docker-registry?branch=next-generation + diff --git a/hack/validate_gofmt.sh b/hack/validate_gofmt.sh new file mode 100755 index 00000000..d718ec12 --- /dev/null +++ b/hack/validate_gofmt.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +badFiles=($(find ./ -iname "*.go" -exec gofmt -s -l {} \;)) + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi From 89c6bb2a902ae9c0fe0d582e108b73b16fd50c51 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 13 Nov 2014 14:39:13 -0800 Subject: [PATCH 036/165] Add route test case with dangerous path --- routes_test.go | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/routes_test.go b/routes_test.go index 8907684a..7ca51ae5 100644 --- a/routes_test.go +++ b/routes_test.go @@ -14,6 +14,7 @@ type routeTestCase struct { RequestURI string Vars map[string]string RouteName string + StatusCode int } // TestRouter registers a test handler with all the routes and ensures that @@ -115,6 +116,16 @@ func TestRouter(t *testing.T) { "name": "foo/bar/image", }, }, + { + RouteName: routeNameLayerUploadResume, + RequestURI: "/v2/foo/../../layer/tarsum.dev+foo:abcdef0919234/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "tarsum": "tarsum.dev+foo:abcdef0919234", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + StatusCode: http.StatusNotFound, + }, } { // Register the endpoint router.GetRoute(testcase.RouteName).Handler(testHandler) @@ -126,16 +137,28 @@ func TestRouter(t *testing.T) { t.Fatalf("error issuing get request: %v", err) } - if resp.StatusCode != http.StatusOK { + if testcase.StatusCode == 0 { + // Override default, zero-value + testcase.StatusCode = http.StatusOK + } + + if resp.StatusCode != testcase.StatusCode { t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) } + if testcase.StatusCode != http.StatusOK { + // We don't care about json response. + continue + } + dec := json.NewDecoder(resp.Body) var actualRouteInfo routeTestCase if err := dec.Decode(&actualRouteInfo); err != nil { t.Fatalf("error reading json response: %v", err) } + // Needs to be set out of band + actualRouteInfo.StatusCode = resp.StatusCode if actualRouteInfo.RouteName != testcase.RouteName { t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) From dd47af5feb7906203dc57a240891709be8a858f9 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Thu, 13 Nov 2014 15:40:06 -0800 Subject: [PATCH 037/165] Test --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 62352689..9f54277a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -25,6 +25,7 @@ notify: email: recipients: - distribution@docker.com + slack: team: docker channel: "#distribution" @@ -32,4 +33,3 @@ notify: token: cwX6NE0KKYYQnZJVRaYWGxxJ on_success: true on_failure: true - From 8e44c1d2097c13de3ae929ddeaa77ba7452d0325 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 13 Nov 2014 15:16:54 -0800 Subject: [PATCH 038/165] Initial implementation of storage layer path mapper We've added a path mapper to support simple mapping between path objects used in the storage layer and the underlying file system. The target of this is to ensure that paths are only calculated in a single place and their format is separated from the data that makes up the path components. This commit only includes spec implementation to support layer reads. Further specs will come along with their implementations. --- common/names.go | 4 + storage/doc.go | 3 + storage/paths.go | 179 ++++++++++++++++++++++++++++++++++++++++++ storage/paths_test.go | 45 +++++++++++ 4 files changed, 231 insertions(+) create mode 100644 storage/doc.go create mode 100644 storage/paths.go create mode 100644 storage/paths_test.go diff --git a/common/names.go b/common/names.go index d856467b..c16cbaad 100644 --- a/common/names.go +++ b/common/names.go @@ -9,6 +9,10 @@ import ( // separated by one period, dash or underscore. var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]{2,}(?:[._-][a-z0-9]+)*`) +// TODO(stevvooe): RepositoryName needs to be limited to some fixed length. +// Looking path prefixes and s3 limitation of 1024, this should likely be +// around 512 bytes. 256 bytes might be more manageable. + // RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 2 to // 5 path components, separated by a forward slash. var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){1,4}` + RepositoryNameComponentRegexp.String()) diff --git a/storage/doc.go b/storage/doc.go new file mode 100644 index 00000000..ecc3fc0b --- /dev/null +++ b/storage/doc.go @@ -0,0 +1,3 @@ +// This package contains storage services for use in the registry application. +// It should be considered an internal package, as of Go 1.4. +package storage diff --git a/storage/paths.go b/storage/paths.go new file mode 100644 index 00000000..76991c1f --- /dev/null +++ b/storage/paths.go @@ -0,0 +1,179 @@ +package storage + +import ( + "fmt" + "path" + + "github.com/docker/docker-registry/common" +) + +const storagePathVersion = "v2" + +// pathMapper maps paths based on "object names" and their ids. The "object +// names" mapped by pathMapper are internal to the storage system. +// +// The path layout in the storage backend will be roughly as follows: +// +// /v2 +// -> repositories/ +// ->/ +// -> images/ +// +// -> layers/ +// -> tarsum/ +// -> / +// -> / +// +// -> layerindex/ +// -> tarsum/ +// -> / +// -> / +// +// -> blob/sha256 +// +// +// There are few important components to this path layout. First, we have the +// repository store identified by name. This contains the image manifests and +// a layer store with links to CAS blob ids. Outside of the named repo area, +// we have the layerindex, which provides lookup from tarsum id to repo +// storage. The blob store contains the actual layer data and any other data +// that can be referenced by a CAS id. +// +// We cover the path formats implemented by this path mapper below. +// +// layerLinkPathSpec: /v2/repositories//layers/tarsum/// +// layerIndexLinkPathSpec: /v2/layerindex/tarsum/// +// blobPathSpec: /v2/blob/sha256// +// +// For more information on the semantic meaning of each path and their +// contents, please see the path spec documentation. +type pathMapper struct { + root string + version string // should be a constant? +} + +// TODO(stevvooe): This storage layout currently allows lookup to layer stores +// by repo name via the tarsum. The layer index lookup could come with an +// access control check against the link contents before proceeding. The main +// problem with this comes with a collision in the tarsum algorithm: if party +// A uploads a layer before party B, with an identical tarsum, party B may +// never be able to get access to the tarsum stored under party A. We'll need +// a way for party B to associate with a "unique" version of their image. This +// may be as simple as forcing the client to re-upload images to which they +// don't have access. + +// path returns the path identified by spec. +func (pm *pathMapper) path(spec pathSpec) (string, error) { + + // Switch on the path object type and return the appropriate path. At + // first glance, one may wonder why we don't use an interface to + // accomplish this. By keep the formatting separate from the pathSpec, we + // keep separate the path generation componentized. These specs could be + // passed to a completely different mapper implementation and generate a + // different set of paths. + // + // For example, imagine migrating from one backend to the other: one could + // build a filesystem walker that converts a string path in one version, + // to an intermediate path object, than can be consumed and mapped by the + // other version. + + switch v := spec.(type) { + case layerLinkPathSpec: + tsi, err := common.ParseTarSum(v.tarSum) + + if err != nil { + // TODO(sday): This will return an InvalidTarSumError from + // ParseTarSum but we may want to wrap this. This error should + // never be encountered in production, since the tarsum should be + // validated by this point. + return "", err + } + + p := path.Join(append([]string{pm.root, pm.version, "repositories", v.name, "layers"}, tarSumInfoPathComponents(tsi)...)...) + + return p, nil + case layerIndexLinkPathSpec: + tsi, err := common.ParseTarSum(v.tarSum) + + if err != nil { + // TODO(sday): This will return an InvalidTarSumError from + // ParseTarSum but we may want to wrap this. This error should + // never be encountered in production, since the tarsum should be + // validated by this point. + return "", err + } + + p := path.Join(append([]string{pm.root, pm.version, "layerindex"}, tarSumInfoPathComponents(tsi)...)...) + + return p, nil + case blobPathSpec: + p := path.Join([]string{pm.root, pm.version, "blob", v.alg, v.digest[:2], v.digest}...) + return p, nil + default: + // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). + return "", fmt.Errorf("unknown path spec: %#v", v) + } +} + +// pathSpec is a type to mark structs as path specs. There is no +// implementation because we'd like to keep the specs and the mappers +// decoupled. +type pathSpec interface { + pathSpec() +} + +// layerLink specifies a path for a layer link, which is a file with a blob +// id. The layer link will contain a content addressable blob id reference +// into the blob store. The format of the contents is as follows: +// +// : +// +// The following example of the file contents is more illustrative: +// +// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 +// +// This says indicates that there is a blob with the id/digest, calculated via +// sha256 that can be fetched from the blob store. +type layerLinkPathSpec struct { + name string + tarSum string +} + +func (layerLinkPathSpec) pathSpec() {} + +// layerIndexLinkPath provides a path to a registry global layer store, +// indexed by tarsum. The target file will contain the repo name of the +// "owner" of the layer. An example name link file follows: +// +// library/ubuntu +// foo/bar +// +// The above file has the tarsum stored under the foo/bar repository and the +// library/ubuntu repository. The storage layer should access the tarsum from +// the first repository to which the client has access. +type layerIndexLinkPathSpec struct { + tarSum string +} + +func (layerIndexLinkPathSpec) pathSpec() {} + +// blobPath contains the path for the registry global blob store. For now, +// this contains layer data, exclusively. +type blobPathSpec struct { + alg string + digest string +} + +func (blobPathSpec) pathSpec() {} + +// tarSumInfoPath generates storage path components for the provided +// TarSumInfo. +func tarSumInfoPathComponents(tsi common.TarSumInfo) []string { + version := tsi.Version + + if version == "" { + version = "v0" + } + + return []string{"tarsum", version, tsi.Algorithm, tsi.Digest} +} diff --git a/storage/paths_test.go b/storage/paths_test.go new file mode 100644 index 00000000..376966c5 --- /dev/null +++ b/storage/paths_test.go @@ -0,0 +1,45 @@ +package storage + +import "testing" + +func TestPathMapper(t *testing.T) { + pm := &pathMapper{ + root: "/pathmapper-test", + } + + for _, testcase := range []struct { + spec pathSpec + expected string + err error + }{ + { + spec: layerLinkPathSpec{ + name: "foo/bar", + tarSum: "tarsum.v1+test:abcdef", + }, + expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/abcdef", + }, + { + spec: layerIndexLinkPathSpec{ + tarSum: "tarsum.v1+test:abcdef", + }, + expected: "/pathmapper-test/layerindex/tarsum/v1/test/abcdef", + }, + { + spec: blobPathSpec{ + alg: "sha512", + digest: "abcdefabcdefabcdef908909909", + }, + expected: "/pathmapper-test/blob/sha512/ab/abcdefabcdefabcdef908909909", + }, + } { + p, err := pm.path(testcase.spec) + if err != nil { + t.Fatal(err) + } + + if p != testcase.expected { + t.Fatalf("unexpected path generated: %q != %q", p, testcase.expected) + } + } +} From 39fee7d40ad9e91befd2b4f831a4344ff564c25f Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 14 Nov 2014 15:44:49 -0800 Subject: [PATCH 039/165] Fixes formatting in errors_test.go to match gofmt rules --- errors_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/errors_test.go b/errors_test.go index 580754f5..e6ec72f9 100644 --- a/errors_test.go +++ b/errors_test.go @@ -8,7 +8,7 @@ import ( // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { - for ec, _ := range errorCodeStrings { + for ec := range errorCodeStrings { if ec.String() != errorCodeStrings[ec] { t.Fatalf("error code string incorrect: %q != %q", ec.String(), errorCodeStrings[ec]) } From a650f0f85457ff4677494331f461fb8099062361 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 17 Nov 2014 11:28:32 -0800 Subject: [PATCH 040/165] Clarify repository naming constraints for registry API After discussion, it was found that one of the proposed regular expressions incorrectly limited separator delimited compoonents to two characters. The desired restriction is to have repository name components limited to two characters minimum. This changeset accomplishes this by wrapping the regular expressions in a validation function, returning detailed feedback on the validation error. With this change, the repository name regular expressions are no longer enough to respond with 404s on invalid repo names. Changes to the router will need to be added to support this. --- common/names.go | 61 +++++++++++++++++++++++++++++++++++++++- common/names_test.go | 67 +++++++++++++++++++++++++++++++------------- 2 files changed, 108 insertions(+), 20 deletions(-) diff --git a/common/names.go b/common/names.go index c16cbaad..43f7247a 100644 --- a/common/names.go +++ b/common/names.go @@ -1,13 +1,25 @@ package common import ( + "fmt" "regexp" + "strings" +) + +const ( + RepositoryNameComponentMinLength = 2 + RepositoryNameComponentMaxLength = 30 + + RepositoryNameMinComponents = 2 + RepositoryNameMaxComponents = 5 + RepositoryNameTotalLengthMax = 255 ) // RepositoryNameComponentRegexp restricts registtry path components names to // start with at least two letters or numbers, with following parts able to // separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]{2,}(?:[._-][a-z0-9]+)*`) +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) +var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) // TODO(stevvooe): RepositoryName needs to be limited to some fixed length. // Looking path prefixes and s3 limitation of 1024, this should likely be @@ -21,3 +33,50 @@ var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentReg var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) // TODO(stevvooe): Contribute these exports back to core, so they are shared. + +var ( + ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) + ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength) + + ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + ErrRepositoryNameTooManyComponents = fmt.Errorf("repository name %v or less components", RepositoryNameMaxComponents) + + ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) +) + +// ValidateRespositoryName ensures the repository name is valid for use in the +// registry. This function accepts a superset of what might be accepted by +// docker core or docker hub. If the name does not pass validation, an error, +// describing the conditions, is returned. +func ValidateRespositoryName(name string) error { + if len(name) > RepositoryNameTotalLengthMax { + return ErrRepositoryNameLong + } + + components := strings.Split(name, "/") + + if len(components) < RepositoryNameMinComponents { + return ErrRepositoryNameMissingComponents + } + + if len(components) > RepositoryNameMaxComponents { + return ErrRepositoryNameTooManyComponents + } + + for _, component := range components { + if len(component) < RepositoryNameComponentMinLength { + return ErrRepositoryNameComponentShort + } + + if len(component) > RepositoryNameComponentMaxLength { + return ErrRepositoryNameComponentLong + } + + if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { + return ErrRepositoryNameComponentInvalid + } + } + + return nil +} diff --git a/common/names_test.go b/common/names_test.go index 17655984..e88257bd 100644 --- a/common/names_test.go +++ b/common/names_test.go @@ -7,56 +7,85 @@ import ( func TestRepositoryNameRegexp(t *testing.T) { for _, testcase := range []struct { input string - valid bool + err error }{ { input: "simple/name", - valid: true, }, { input: "library/ubuntu", - valid: true, }, { input: "docker/stevvooe/app", - valid: true, }, { input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", - valid: true, + err: ErrRepositoryNameTooManyComponents, }, { - input: "a/a/a/a/a/a/b/b/b/b", - valid: false, + input: "aa/aa/bb/bb/bb", + }, + { + input: "a/a/a/b/b", + err: ErrRepositoryNameComponentShort, }, { input: "a/a/a/a/", - valid: false, + err: ErrRepositoryNameComponentShort, }, { input: "foo.com/bar/baz", - valid: true, }, { input: "blog.foo.com/bar/baz", - valid: true, }, { input: "asdf", - valid: false, + err: ErrRepositoryNameMissingComponents, }, { - input: "asdf$$^/", - valid: false, + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "aa-a/aa", + }, + { + input: "aa/aa", + }, + { + input: "a-a/a-a", + }, + { + input: "a", + err: ErrRepositoryNameMissingComponents, + }, + { + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, }, } { - if RepositoryNameRegexp.MatchString(testcase.input) != testcase.valid { - status := "invalid" - if testcase.valid { - status = "valid" - } - t.Fatalf("expected %q to be %s repository name", testcase.input, status) + failf := func(format string, v ...interface{}) { + t.Logf(testcase.input+": "+format, v...) + t.Fail() + } + + if err := ValidateRespositoryName(testcase.input); err != testcase.err { + if testcase.err != nil { + if err != nil { + failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) + } else { + failf("expected invalid repository: %v", testcase.err) + } + } else { + if err != nil { + // Wrong error returned. + failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) + } else { + failf("unexpected error validating repository name: %v", err) + } + } } } } From 8ad7819b1bb916fa485186e980fafa60adde3b38 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 17 Nov 2014 13:07:58 -0800 Subject: [PATCH 041/165] Fixes "go vet" for drone CI Removes "go get" commands for go cmd packages to use the default versions Also updates client/client.go to conform to go vet style --- .drone.yml | 5 +---- client/client.go | 50 ++++++++++++++++++++++++++---------------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/.drone.yml b/.drone.yml index 9f54277a..ce75efc1 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,9 +5,6 @@ env: - PATH=$PATH:$GOROOT/bin:$GOPATH/bin script: - - go get code.google.com/p/go.tools/cmd/vet - - go get code.google.com/p/go.tools/cmd/godoc - - go get code.google.com/p/go.tools/cmd/cover - go get github.com/axw/gocov/gocov - go get github.com/mattn/goveralls - go get github.com/golang/lint/golint @@ -15,7 +12,7 @@ script: - go get -d -t ./... - hack/validate_gofmt.sh - - goveralls -v -service drone.io -repotoken $COVERALLS_TOKEN + - goveralls -v -service drone.io -repotoken {{COVERALLS_TOKEN}} - go vet ./... - golint ./... - go test -v ./... diff --git a/client/client.go b/client/client.go index 1270e256..dcfa8b2d 100644 --- a/client/client.go +++ b/client/client.go @@ -90,7 +90,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest case response.StatusCode == http.StatusOK: break case response.StatusCode == http.StatusNotFound: - return nil, ®istry.ImageManifestNotFoundError{name, tag} + return nil, ®istry.ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -100,7 +100,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest } return nil, errors default: - return nil, ®istry.UnexpectedHttpStatusError{response.Status} + return nil, ®istry.UnexpectedHttpStatusError{Status: response.Status} } decoder := json.NewDecoder(response.Body) @@ -144,7 +144,7 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *registry.Image } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -166,7 +166,7 @@ func (r *clientImpl) DeleteImage(name, tag string) error { case response.StatusCode == http.StatusNoContent: break case response.StatusCode == http.StatusNotFound: - return ®istry.ImageManifestNotFoundError{name, tag} + return ®istry.ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -176,7 +176,7 @@ func (r *clientImpl) DeleteImage(name, tag string) error { } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } return nil @@ -194,7 +194,7 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { case response.StatusCode == http.StatusOK: break case response.StatusCode == http.StatusNotFound: - return nil, ®istry.RepositoryNotFoundError{name} + return nil, ®istry.RepositoryNotFoundError{Name: name} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -204,7 +204,7 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { } return nil, errors default: - return nil, ®istry.UnexpectedHttpStatusError{response.Status} + return nil, ®istry.UnexpectedHttpStatusError{Status: response.Status} } tags := struct { @@ -234,7 +234,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read } if response.StatusCode == http.StatusNotFound { - return nil, 0, ®istry.LayerNotFoundError{name, tarsum} + return nil, 0, ®istry.LayerNotFoundError{Name: name, TarSum: tarsum} } // TODO(bbland): handle other status codes, like 5xx errors switch { @@ -247,7 +247,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read return response.Body, int(length), nil case response.StatusCode == http.StatusNotFound: response.Body.Close() - return nil, 0, ®istry.LayerNotFoundError{name, tarsum} + return nil, 0, ®istry.LayerNotFoundError{Name: name, TarSum: tarsum} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -258,7 +258,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read return nil, 0, errors default: response.Body.Close() - return nil, 0, ®istry.UnexpectedHttpStatusError{response.Status} + return nil, 0, ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -280,7 +280,7 @@ func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { case response.StatusCode == http.StatusAccepted: return response.Header.Get("Location"), nil case response.StatusCode == http.StatusNotModified: - return "", ®istry.LayerAlreadyExistsError{name, tarsum} + return "", ®istry.LayerAlreadyExistsError{Name: name, TarSum: tarsum} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -290,7 +290,7 @@ func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { } return "", errors default: - return "", ®istry.UnexpectedHttpStatusError{response.Status} + return "", ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -306,7 +306,7 @@ func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { case response.StatusCode == http.StatusNoContent: return parseRangeHeader(response.Header.Get("Range")) case response.StatusCode == http.StatusNotFound: - return 0, 0, ®istry.LayerUploadNotFoundError{location} + return 0, 0, ®istry.LayerUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -316,7 +316,7 @@ func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { } return 0, 0, errors default: - return 0, 0, ®istry.UnexpectedHttpStatusError{response.Status} + return 0, 0, ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -348,7 +348,7 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{location} + return ®istry.LayerUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -358,7 +358,7 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -393,9 +393,13 @@ func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, if err != nil { return err } - return ®istry.LayerUploadInvalidRangeError{location, lastValidRange, layerSize} + return ®istry.LayerUploadInvalidRangeError{ + Location: location, + LastValidRange: lastValidRange, + LayerSize: layerSize, + } case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{location} + return ®istry.LayerUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -405,7 +409,7 @@ func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -437,7 +441,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{location} + return ®istry.LayerUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -447,7 +451,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } } @@ -469,7 +473,7 @@ func (r *clientImpl) CancelLayerUpload(location string) error { case response.StatusCode == http.StatusNoContent: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{location} + return ®istry.LayerUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -479,7 +483,7 @@ func (r *clientImpl) CancelLayerUpload(location string) error { } return errors default: - return ®istry.UnexpectedHttpStatusError{response.Status} + return ®istry.UnexpectedHttpStatusError{Status: response.Status} } } From b02ca32ac88bc836b63495e5442216c6dc5251b6 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Thu, 13 Nov 2014 15:40:06 -0800 Subject: [PATCH 042/165] Fix drone --- .drone.yml | 10 +++++----- hack/validate_gofmt.sh | 18 ------------------ 2 files changed, 5 insertions(+), 23 deletions(-) delete mode 100755 hack/validate_gofmt.sh diff --git a/.drone.yml b/.drone.yml index ce75efc1..f98fe203 100644 --- a/.drone.yml +++ b/.drone.yml @@ -9,13 +9,13 @@ script: - go get github.com/mattn/goveralls - go get github.com/golang/lint/golint - - go get -d -t ./... + - go get -t ./... - - hack/validate_gofmt.sh - - goveralls -v -service drone.io -repotoken {{COVERALLS_TOKEN}} + - FAIL=$(find ./ -iname "*.go" -exec gofmt -s -l {} \;) && echo "$FAIL" && test -z "$FAIL" - go vet ./... - - golint ./... + - FAIL=$(golint ./...) && echo "$FAIL" && test -z "$FAIL" - go test -v ./... + - goveralls -v -service drone.io -repotoken {{COVERALLS_TOKEN}} # - go build --tags SOMETAG notify: @@ -27,6 +27,6 @@ notify: team: docker channel: "#distribution" username: mom - token: cwX6NE0KKYYQnZJVRaYWGxxJ + token: {{SLACK_TOKEN}} on_success: true on_failure: true diff --git a/hack/validate_gofmt.sh b/hack/validate_gofmt.sh deleted file mode 100755 index d718ec12..00000000 --- a/hack/validate_gofmt.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -badFiles=($(find ./ -iname "*.go" -exec gofmt -s -l {} \;)) - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files are properly formatted.' -else - { - echo "These files are not properly gofmt'd:" - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' - echo - } >&2 - false -fi From 88795e0a142703ec861a1c416898551e8f0bd4d8 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 17 Nov 2014 15:44:07 -0800 Subject: [PATCH 043/165] Lots of various golint fixes Changes some names to match go conventions Comments all exported methods Removes dot imports --- client/client.go | 32 ++-- common/names.go | 41 +++- common/tarsum.go | 2 + configuration/configuration.go | 12 +- configuration/configuration_test.go | 28 +-- errors.go | 54 ++++-- images.go | 1 + storage/doc.go | 4 +- .../filesystem/{filesystem.go => driver.go} | 57 +++--- .../{filesystem_test.go => driver_test.go} | 2 +- .../inmemory/{inmemory.go => driver.go} | 51 +++-- .../{inmemory_test.go => driver_test.go} | 7 +- storagedriver/ipc/client.go | 13 ++ storagedriver/ipc/ipc.go | 30 +-- storagedriver/ipc/server.go | 24 ++- storagedriver/s3/s3.go | 68 ++++--- storagedriver/s3/s3_test.go | 7 +- storagedriver/storagedriver.go | 63 ++++--- storagedriver/testsuites/testsuites.go | 178 ++++++++++-------- 19 files changed, 417 insertions(+), 257 deletions(-) rename storagedriver/filesystem/{filesystem.go => driver.go} (64%) rename storagedriver/filesystem/{filesystem_test.go => driver_test.go} (90%) rename storagedriver/inmemory/{inmemory.go => driver.go} (62%) rename storagedriver/inmemory/{inmemory_test.go => driver_test.go} (75%) diff --git a/client/client.go b/client/client.go index dcfa8b2d..c4158695 100644 --- a/client/client.go +++ b/client/client.go @@ -79,7 +79,7 @@ type clientImpl struct { // TODO(bbland): use consistent route generation between server and client func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest, error) { - response, err := http.Get(r.imageManifestUrl(name, tag)) + response, err := http.Get(r.imageManifestURL(name, tag)) if err != nil { return nil, err } @@ -100,7 +100,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest } return nil, errors default: - return nil, ®istry.UnexpectedHttpStatusError{Status: response.Status} + return nil, ®istry.UnexpectedHTTPStatusError{Status: response.Status} } decoder := json.NewDecoder(response.Body) @@ -120,7 +120,7 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *registry.Image } putRequest, err := http.NewRequest("PUT", - r.imageManifestUrl(name, tag), bytes.NewReader(manifestBytes)) + r.imageManifestURL(name, tag), bytes.NewReader(manifestBytes)) if err != nil { return err } @@ -144,13 +144,13 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *registry.Image } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } func (r *clientImpl) DeleteImage(name, tag string) error { deleteRequest, err := http.NewRequest("DELETE", - r.imageManifestUrl(name, tag), nil) + r.imageManifestURL(name, tag), nil) if err != nil { return err } @@ -176,7 +176,7 @@ func (r *clientImpl) DeleteImage(name, tag string) error { } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } return nil @@ -204,7 +204,7 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { } return nil, errors default: - return nil, ®istry.UnexpectedHttpStatusError{Status: response.Status} + return nil, ®istry.UnexpectedHTTPStatusError{Status: response.Status} } tags := struct { @@ -258,7 +258,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read return nil, 0, errors default: response.Body.Close() - return nil, 0, ®istry.UnexpectedHttpStatusError{Status: response.Status} + return nil, 0, ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -290,7 +290,7 @@ func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { } return "", errors default: - return "", ®istry.UnexpectedHttpStatusError{Status: response.Status} + return "", ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -316,7 +316,7 @@ func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { } return 0, 0, errors default: - return 0, 0, ®istry.UnexpectedHttpStatusError{Status: response.Status} + return 0, 0, ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -358,7 +358,7 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -409,7 +409,7 @@ func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -451,7 +451,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } @@ -483,13 +483,13 @@ func (r *clientImpl) CancelLayerUpload(location string) error { } return errors default: - return ®istry.UnexpectedHttpStatusError{Status: response.Status} + return ®istry.UnexpectedHTTPStatusError{Status: response.Status} } } -// imageManifestUrl is a helper method for returning the full url to an image +// imageManifestURL is a helper method for returning the full url to an image // manifest -func (r *clientImpl) imageManifestUrl(name, tag string) string { +func (r *clientImpl) imageManifestURL(name, tag string) string { return fmt.Sprintf("%s/v2/%s/image/%s", r.Endpoint, name, tag) } diff --git a/common/names.go b/common/names.go index 43f7247a..0172c480 100644 --- a/common/names.go +++ b/common/names.go @@ -7,11 +7,24 @@ import ( ) const ( + // RepositoryNameComponentMinLength is the minimum number of characters in a + // single repository name slash-delimited component RepositoryNameComponentMinLength = 2 + + // RepositoryNameComponentMaxLength is the maximum number of characters in a + // single repository name slash-delimited component RepositoryNameComponentMaxLength = 30 - RepositoryNameMinComponents = 2 - RepositoryNameMaxComponents = 5 + // RepositoryNameMinComponents is the minimum number of slash-delimited + // components that a repository name must have + RepositoryNameMinComponents = 2 + + // RepositoryNameMaxComponents is the maximum number of slash-delimited + // components that a repository name must have + RepositoryNameMaxComponents = 5 + + // RepositoryNameTotalLengthMax is the maximum total number of characters in + // a repository name RepositoryNameTotalLengthMax = 255 ) @@ -19,6 +32,9 @@ const ( // start with at least two letters or numbers, with following parts able to // separated by one period, dash or underscore. var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) + +// RepositoryNameComponentAnchoredRegexp is the version of +// RepositoryNameComponentRegexp which must completely match the content var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) // TODO(stevvooe): RepositoryName needs to be limited to some fixed length. @@ -35,13 +51,30 @@ var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) // TODO(stevvooe): Contribute these exports back to core, so they are shared. var ( + // ErrRepositoryNameComponentShort is returned when a repository name + // contains a component which is shorter than + // RepositoryNameComponentMinLength ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) - ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength) + // ErrRepositoryNameComponentLong is returned when a repository name + // contains a component which is longer than + // RepositoryNameComponentMaxLength + ErrRepositoryNameComponentLong = fmt.Errorf("respository name component must be %v characters or less", RepositoryNameComponentMaxLength) + + // ErrRepositoryNameMissingComponents is returned when a repository name + // contains fewer than RepositoryNameMinComponents components ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + + // ErrRepositoryNameTooManyComponents is returned when a repository name + // contains more than RepositoryNameMaxComponents components ErrRepositoryNameTooManyComponents = fmt.Errorf("repository name %v or less components", RepositoryNameMaxComponents) - ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + // ErrRepositoryNameLong is returned when a repository name is longer than + // RepositoryNameTotalLengthMax + ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + + // ErrRepositoryNameComponentInvalid is returned when a repository name does + // not match RepositoryNameComponentRegexp ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) ) diff --git a/common/tarsum.go b/common/tarsum.go index 5a6e7d21..a1a56d6d 100644 --- a/common/tarsum.go +++ b/common/tarsum.go @@ -25,6 +25,8 @@ type TarSumInfo struct { Digest string } +// InvalidTarSumError provides informations about a TarSum that cannot be parsed +// by ParseTarSum. type InvalidTarSumError struct { TarSum string } diff --git a/configuration/configuration.go b/configuration/configuration.go index c9bb72e3..85d74d95 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -23,9 +23,9 @@ type Configuration struct { Storage Storage `yaml:"storage"` } -// v_0_1_Configuration is a Version 0.1 Configuration struct +// v0_1Configuration is a Version 0.1 Configuration struct // This is currently aliased to Configuration, as it is the current version -type v_0_1_Configuration Configuration +type v0_1Configuration Configuration // Version is a major/minor version pair of the form Major.Minor // Major version upgrades indicate structure or type changes @@ -195,7 +195,7 @@ func Parse(in []byte) (*Configuration, error) { // Parse the remainder of the configuration depending on the provided version switch untypedConfig.Version { case "0.1": - config, err = parseV_0_1_Registry(in) + config, err = parseV0_1Registry(in) if err != nil { return nil, err } @@ -206,11 +206,11 @@ func Parse(in []byte) (*Configuration, error) { return config, nil } -// parseV_0_1_Registry parses a registry Configuration for Version 0.1 -func parseV_0_1_Registry(in []byte) (*Configuration, error) { +// parseV0_1Registry parses a registry Configuration for Version 0.1 +func parseV0_1Registry(in []byte) (*Configuration, error) { envMap := getEnvMap() - var config v_0_1_Configuration + var config v0_1Configuration err := yaml.Unmarshal(in, &config) if err != nil { return nil, err diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index cde679e2..31d15b7a 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -12,7 +12,7 @@ import ( // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } -// configStruct is a canonical example configuration, which should map to configYamlV_0_1 +// configStruct is a canonical example configuration, which should map to configYamlV0_1 var configStruct = Configuration{ Version: "0.1", Loglevel: "info", @@ -31,8 +31,8 @@ var configStruct = Configuration{ }, } -// configYamlV_0_1 is a Version 0.1 yaml document representing configStruct -var configYamlV_0_1 = ` +// configYamlV0_1 is a Version 0.1 yaml document representing configStruct +var configYamlV0_1 = ` version: 0.1 loglevel: info storage: @@ -48,9 +48,9 @@ storage: port: ~ ` -// inmemoryConfigYamlV_0_1 is a Version 0.1 yaml document specifying an inmemory storage driver with +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory storage driver with // no parameters -var inmemoryConfigYamlV_0_1 = ` +var inmemoryConfigYamlV0_1 = ` version: 0.1 loglevel: info storage: inmemory @@ -77,9 +77,9 @@ func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } -// TestParseSimple validates that configYamlV_0_1 can be parsed into a struct matching configStruct +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct matching configStruct func (suite *ConfigSuite) TestParseSimple(c *C) { - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -89,7 +89,7 @@ func (suite *ConfigSuite) TestParseSimple(c *C) { func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - config, err := Parse([]byte(inmemoryConfigYamlV_0_1)) + config, err := Parse([]byte(inmemoryConfigYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -100,7 +100,7 @@ func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -117,7 +117,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -129,7 +129,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { os.Setenv("REGISTRY_STORAGE", "inmemory") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -144,7 +144,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -154,7 +154,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "info") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -166,7 +166,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "error") - config, err := Parse([]byte(configYamlV_0_1)) + config, err := Parse([]byte(configYamlV0_1)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } diff --git a/errors.go b/errors.go index 29cfdd70..abec4965 100644 --- a/errors.go +++ b/errors.go @@ -10,18 +10,43 @@ import ( type ErrorCode int const ( + // ErrorCodeUnknown is a catch-all for errors not defined below. ErrorCodeUnknown ErrorCode = iota // The following errors can happen during a layer upload. + + // ErrorCodeInvalidChecksum is returned when uploading a layer if the + // provided checksum does not match the layer contents. ErrorCodeInvalidChecksum + + // ErrorCodeInvalidLength is returned when uploading a layer if the provided + // length does not match the content length. ErrorCodeInvalidLength + + // ErrorCodeInvalidTarsum is returned when the provided tarsum does not + // match the computed tarsum of the contents. ErrorCodeInvalidTarsum // The following errors can happen during manifest upload. + + // ErrorCodeInvalidName is returned when the name in the manifest does not + // match the provided name. ErrorCodeInvalidName + + // ErrorCodeInvalidTag is returned when the tag in the manifest does not + // match the provided tag. ErrorCodeInvalidTag + + // ErrorCodeUnverifiedManifest is returned when the manifest fails signature + // validation. ErrorCodeUnverifiedManifest + + // ErrorCodeUnknownLayer is returned when the manifest references a + // nonexistent layer. ErrorCodeUnknownLayer + + // ErrorCodeUntrustedSignature is returned when the manifest is signed by an + // untrusted source. ErrorCodeUntrustedSignature ) @@ -83,6 +108,7 @@ func (ec ErrorCode) String() string { return s } +// Message returned the human-readable error message for this error code. func (ec ErrorCode) Message() string { m, ok := errorCodesMessages[ec] @@ -93,16 +119,20 @@ func (ec ErrorCode) Message() string { return m } +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. func (ec ErrorCode) MarshalText() (text []byte, err error) { return []byte(ec.String()), nil } +// UnmarshalText decodes the form generated by MarshalText. func (ec *ErrorCode) UnmarshalText(text []byte) error { *ec = stringToErrorCode[string(text)] return nil } +// Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code,omitempty"` Message string `json:"message,omitempty"` @@ -173,7 +203,7 @@ type DetailUnknownLayer struct { } // RepositoryNotFoundError is returned when making an operation against a -// repository that does not exist in the registry +// repository that does not exist in the registry. type RepositoryNotFoundError struct { Name string } @@ -183,7 +213,7 @@ func (e *RepositoryNotFoundError) Error() string { } // ImageManifestNotFoundError is returned when making an operation against a -// given image manifest that does not exist in the registry +// given image manifest that does not exist in the registry. type ImageManifestNotFoundError struct { Name string Tag string @@ -195,7 +225,7 @@ func (e *ImageManifestNotFoundError) Error() string { } // LayerAlreadyExistsError is returned when attempting to create a new layer -// that already exists in the registry +// that already exists in the registry. type LayerAlreadyExistsError struct { Name string TarSum string @@ -207,7 +237,7 @@ func (e *LayerAlreadyExistsError) Error() string { } // LayerNotFoundError is returned when making an operation against a given image -// layer that does not exist in the registry +// layer that does not exist in the registry. type LayerNotFoundError struct { Name string TarSum string @@ -221,7 +251,7 @@ func (e *LayerNotFoundError) Error() string { // LayerUploadNotFoundError is returned when making a layer upload operation // against an invalid layer upload location url // This may be the result of using a cancelled, completed, or stale upload -// locationn +// location. type LayerUploadNotFoundError struct { Location string } @@ -232,9 +262,9 @@ func (e *LayerUploadNotFoundError) Error() string { } // LayerUploadInvalidRangeError is returned when attempting to upload an image -// layer chunk that is out of order +// layer chunk that is out of order. // This provides the known LayerSize and LastValidRange which can be used to -// resume the upload +// resume the upload. type LayerUploadInvalidRangeError struct { Location string LastValidRange int @@ -247,12 +277,12 @@ func (e *LayerUploadInvalidRangeError) Error() string { e.Location, e.LastValidRange, e.LayerSize) } -// UnexpectedHttpStatusError is returned when an unexpected http status is -// returned when making a registry api call -type UnexpectedHttpStatusError struct { +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { Status string } -func (e *UnexpectedHttpStatusError) Error() string { - return fmt.Sprintf("Received unexpected http status: %s", e.Status) +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) } diff --git a/images.go b/images.go index 84fe217d..e30c6a5f 100644 --- a/images.go +++ b/images.go @@ -36,6 +36,7 @@ type ImageManifest struct { // imageManifest is used to avoid recursion in unmarshaling type imageManifest ImageManifest +// UnmarshalJSON populates a new ImageManifest struct from JSON data. func (m *ImageManifest) UnmarshalJSON(b []byte) error { var manifest imageManifest err := json.Unmarshal(b, &manifest) diff --git a/storage/doc.go b/storage/doc.go index ecc3fc0b..387d9234 100644 --- a/storage/doc.go +++ b/storage/doc.go @@ -1,3 +1,3 @@ -// This package contains storage services for use in the registry application. -// It should be considered an internal package, as of Go 1.4. +// Package storage contains storage services for use in the registry +// application. It should be considered an internal package, as of Go 1.4. package storage diff --git a/storagedriver/filesystem/filesystem.go b/storagedriver/filesystem/driver.go similarity index 64% rename from storagedriver/filesystem/filesystem.go rename to storagedriver/filesystem/driver.go index 96c78160..46134259 100644 --- a/storagedriver/filesystem/filesystem.go +++ b/storagedriver/filesystem/driver.go @@ -11,11 +11,11 @@ import ( "github.com/docker/docker-registry/storagedriver/factory" ) -const DriverName = "filesystem" -const DefaultRootDirectory = "/tmp/registry/storage" +const driverName = "filesystem" +const defaultRootDirectory = "/tmp/registry/storage" func init() { - factory.Register(DriverName, &filesystemDriverFactory{}) + factory.Register(driverName, &filesystemDriverFactory{}) } // filesystemDriverFactory implements the factory.StorageDriverFactory interface @@ -25,17 +25,17 @@ func (factory *filesystemDriverFactory) Create(parameters map[string]string) (st return FromParameters(parameters), nil } -// FilesystemDriver is a storagedriver.StorageDriver implementation backed by a local filesystem -// All provided paths will be subpaths of the RootDirectory -type FilesystemDriver struct { +// Driver is a storagedriver.StorageDriver implementation backed by a local +// filesystem. All provided paths will be subpaths of the RootDirectory +type Driver struct { rootDirectory string } -// FromParameters constructs a new FilesystemDriver with a given parameters map +// FromParameters constructs a new Driver with a given parameters map // Optional Parameters: // - rootdirectory -func FromParameters(parameters map[string]string) *FilesystemDriver { - var rootDirectory = DefaultRootDirectory +func FromParameters(parameters map[string]string) *Driver { + var rootDirectory = defaultRootDirectory if parameters != nil { rootDir, ok := parameters["rootdirectory"] if ok { @@ -45,19 +45,20 @@ func FromParameters(parameters map[string]string) *FilesystemDriver { return New(rootDirectory) } -// New constructs a new FilesystemDriver with a given rootDirectory -func New(rootDirectory string) *FilesystemDriver { - return &FilesystemDriver{rootDirectory} +// New constructs a new Driver with a given rootDirectory +func New(rootDirectory string) *Driver { + return &Driver{rootDirectory} } -// subPath returns the absolute path of a key within the FilesystemDriver's storage -func (d *FilesystemDriver) subPath(subPath string) string { +// subPath returns the absolute path of a key within the Driver's storage +func (d *Driver) subPath(subPath string) string { return path.Join(d.rootDirectory, subPath) } // Implement the storagedriver.StorageDriver interface -func (d *FilesystemDriver) GetContent(path string) ([]byte, error) { +// GetContent retrieves the content stored at "path" as a []byte. +func (d *Driver) GetContent(path string) ([]byte, error) { contents, err := ioutil.ReadFile(d.subPath(path)) if err != nil { return nil, storagedriver.PathNotFoundError{Path: path} @@ -65,7 +66,8 @@ func (d *FilesystemDriver) GetContent(path string) ([]byte, error) { return contents, nil } -func (d *FilesystemDriver) PutContent(subPath string, contents []byte) error { +// PutContent stores the []byte content at a location designated by "path". +func (d *Driver) PutContent(subPath string, contents []byte) error { fullPath := d.subPath(subPath) parentDir := path.Dir(fullPath) err := os.MkdirAll(parentDir, 0755) @@ -77,7 +79,9 @@ func (d *FilesystemDriver) PutContent(subPath string, contents []byte) error { return err } -func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { file, err := os.OpenFile(d.subPath(path), os.O_RDONLY, 0644) if err != nil { return nil, err @@ -95,7 +99,9 @@ func (d *FilesystemDriver) ReadStream(path string, offset uint64) (io.ReadCloser return file, nil } -func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error { +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *Driver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error { defer reader.Close() resumableOffset, err := d.CurrentSize(subPath) @@ -154,7 +160,9 @@ func (d *FilesystemDriver) WriteStream(subPath string, offset, size uint64, read return err } -func (d *FilesystemDriver) CurrentSize(subPath string) (uint64, error) { +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. +func (d *Driver) CurrentSize(subPath string) (uint64, error) { fullPath := d.subPath(subPath) fileInfo, err := os.Stat(fullPath) @@ -166,7 +174,9 @@ func (d *FilesystemDriver) CurrentSize(subPath string) (uint64, error) { return uint64(fileInfo.Size()), nil } -func (d *FilesystemDriver) List(subPath string) ([]string, error) { +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *Driver) List(subPath string) ([]string, error) { subPath = strings.TrimRight(subPath, "/") fullPath := d.subPath(subPath) @@ -188,12 +198,15 @@ func (d *FilesystemDriver) List(subPath string) ([]string, error) { return keys, nil } -func (d *FilesystemDriver) Move(sourcePath string, destPath string) error { +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *Driver) Move(sourcePath string, destPath string) error { err := os.Rename(d.subPath(sourcePath), d.subPath(destPath)) return err } -func (d *FilesystemDriver) Delete(subPath string) error { +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *Driver) Delete(subPath string) error { fullPath := d.subPath(subPath) _, err := os.Stat(fullPath) diff --git a/storagedriver/filesystem/filesystem_test.go b/storagedriver/filesystem/driver_test.go similarity index 90% rename from storagedriver/filesystem/filesystem_test.go rename to storagedriver/filesystem/driver_test.go index 7eb4024c..1d9bac54 100644 --- a/storagedriver/filesystem/filesystem_test.go +++ b/storagedriver/filesystem/driver_test.go @@ -20,5 +20,5 @@ func init() { return New(rootDirectory), nil } testsuites.RegisterInProcessSuite(filesystemDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite(DriverName, map[string]string{"rootdirectory": rootDirectory}, testsuites.NeverSkip) + testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": rootDirectory}, testsuites.NeverSkip) } diff --git a/storagedriver/inmemory/inmemory.go b/storagedriver/inmemory/driver.go similarity index 62% rename from storagedriver/inmemory/inmemory.go rename to storagedriver/inmemory/driver.go index 14590a3a..8685eb25 100644 --- a/storagedriver/inmemory/inmemory.go +++ b/storagedriver/inmemory/driver.go @@ -13,34 +13,35 @@ import ( "github.com/docker/docker-registry/storagedriver/factory" ) -const DriverName = "inmemory" +const driverName = "inmemory" func init() { - factory.Register(DriverName, &inMemoryDriverFactory{}) + factory.Register(driverName, &inMemoryDriverFactory{}) } -// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface +// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. type inMemoryDriverFactory struct{} func (factory *inMemoryDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { return New(), nil } -// InMemoryDriver is a storagedriver.StorageDriver implementation backed by a local map -// Intended solely for example and testing purposes -type InMemoryDriver struct { +// Driver is a storagedriver.StorageDriver implementation backed by a local map. +// Intended solely for example and testing purposes. +type Driver struct { storage map[string][]byte mutex sync.RWMutex } -// New constructs a new InMemoryDriver -func New() *InMemoryDriver { - return &InMemoryDriver{storage: make(map[string][]byte)} +// New constructs a new Driver. +func New() *Driver { + return &Driver{storage: make(map[string][]byte)} } -// Implement the storagedriver.StorageDriver interface +// Implement the storagedriver.StorageDriver interface. -func (d *InMemoryDriver) GetContent(path string) ([]byte, error) { +// GetContent retrieves the content stored at "path" as a []byte. +func (d *Driver) GetContent(path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() contents, ok := d.storage[path] @@ -50,14 +51,17 @@ func (d *InMemoryDriver) GetContent(path string) ([]byte, error) { return contents, nil } -func (d *InMemoryDriver) PutContent(path string, contents []byte) error { +// PutContent stores the []byte content at a location designated by "path". +func (d *Driver) PutContent(path string, contents []byte) error { d.mutex.Lock() defer d.mutex.Unlock() d.storage[path] = contents return nil } -func (d *InMemoryDriver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() contents, err := d.GetContent(path) @@ -73,7 +77,9 @@ func (d *InMemoryDriver) ReadStream(path string, offset uint64) (io.ReadCloser, return ioutil.NopCloser(bytes.NewReader(buf)), nil } -func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { defer reader.Close() d.mutex.RLock() defer d.mutex.RUnlock() @@ -100,7 +106,9 @@ func (d *InMemoryDriver) WriteStream(path string, offset, size uint64, reader io return nil } -func (d *InMemoryDriver) CurrentSize(path string) (uint64, error) { +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. +func (d *Driver) CurrentSize(path string) (uint64, error) { d.mutex.RLock() defer d.mutex.RUnlock() contents, ok := d.storage[path] @@ -110,7 +118,9 @@ func (d *InMemoryDriver) CurrentSize(path string) (uint64, error) { return uint64(len(contents)), nil } -func (d *InMemoryDriver) List(path string) ([]string, error) { +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *Driver) List(path string) ([]string, error) { subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s/[^/]+", path)) if err != nil { return nil, err @@ -133,7 +143,9 @@ func (d *InMemoryDriver) List(path string) ([]string, error) { return keys, nil } -func (d *InMemoryDriver) Move(sourcePath string, destPath string) error { +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *Driver) Move(sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() contents, ok := d.storage[sourcePath] @@ -145,10 +157,11 @@ func (d *InMemoryDriver) Move(sourcePath string, destPath string) error { return nil } -func (d *InMemoryDriver) Delete(path string) error { +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *Driver) Delete(path string) error { d.mutex.Lock() defer d.mutex.Unlock() - subPaths := make([]string, 0) + var subPaths []string for k := range d.storage { if strings.HasPrefix(k, path) { subPaths = append(subPaths, k) diff --git a/storagedriver/inmemory/inmemory_test.go b/storagedriver/inmemory/driver_test.go similarity index 75% rename from storagedriver/inmemory/inmemory_test.go rename to storagedriver/inmemory/driver_test.go index feea5eab..87549542 100644 --- a/storagedriver/inmemory/inmemory_test.go +++ b/storagedriver/inmemory/driver_test.go @@ -5,16 +5,17 @@ import ( "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/testsuites" - . "gopkg.in/check.v1" + + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { return New(), nil } testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite(DriverName, nil, testsuites.NeverSkip) + testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 7f41081a..332afe1e 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -173,6 +173,7 @@ func (driver *StorageDriverClient) Stop() error { // Implement the storagedriver.StorageDriver interface over IPC +// GetContent retrieves the content stored at "path" as a []byte. func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { if err := driver.exited(); err != nil { return nil, err @@ -204,6 +205,7 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { return contents, nil } +// PutContent stores the []byte content at a location designated by "path". func (driver *StorageDriverClient) PutContent(path string, contents []byte) error { if err := driver.exited(); err != nil { return err @@ -230,6 +232,8 @@ func (driver *StorageDriverClient) PutContent(path string, contents []byte) erro return nil } +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.ReadCloser, error) { if err := driver.exited(); err != nil { return nil, err @@ -255,6 +259,8 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re return response.Reader, nil } +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { if err := driver.exited(); err != nil { return err @@ -280,6 +286,8 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, return nil } +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { if err := driver.exited(); err != nil { return 0, err @@ -305,6 +313,8 @@ func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { return response.Position, nil } +// List returns a list of the objects that are direct descendants of the given +// path. func (driver *StorageDriverClient) List(path string) ([]string, error) { if err := driver.exited(); err != nil { return nil, err @@ -330,6 +340,8 @@ func (driver *StorageDriverClient) List(path string) ([]string, error) { return response.Keys, nil } +// Move moves an object stored at sourcePath to destPath, removing the original +// object. func (driver *StorageDriverClient) Move(sourcePath string, destPath string) error { if err := driver.exited(); err != nil { return err @@ -355,6 +367,7 @@ func (driver *StorageDriverClient) Move(sourcePath string, destPath string) erro return nil } +// Delete recursively deletes all objects stored at "path" and its subpaths. func (driver *StorageDriverClient) Delete(path string) error { if err := driver.exited(); err != nil { return err diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 00baee1c..898d10bf 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -9,10 +9,10 @@ import ( "github.com/docker/libchan" ) -// IPCStorageDriver is the interface which IPC storage drivers must implement. As external storage +// StorageDriver is the interface which IPC storage drivers must implement. As external storage // drivers may be defined to use a different version of the storagedriver.StorageDriver interface, // we use an additional version check to determine compatiblity. -type IPCStorageDriver interface { +type StorageDriver interface { // Version returns the storagedriver.StorageDriver interface version which this storage driver // implements, which is used to determine driver compatibility Version() (storagedriver.Version, error) @@ -36,23 +36,25 @@ type Request struct { ResponseChannel libchan.Sender } -type responseError struct { +// ResponseError is a serializable error type. +type ResponseError struct { Type string Message string } -// ResponseError wraps an error in a serializable struct containing the error's type and message -func ResponseError(err error) *responseError { +// WrapError wraps an error in a serializable struct containing the error's type +// and message. +func WrapError(err error) *ResponseError { if err == nil { return nil } - return &responseError{ + return &ResponseError{ Type: reflect.TypeOf(err).String(), Message: err.Error(), } } -func (err *responseError) Error() string { +func (err *ResponseError) Error() string { return fmt.Sprintf("%s: %s", err.Type, err.Message) } @@ -61,38 +63,38 @@ func (err *responseError) Error() string { // VersionResponse is a response for a Version request type VersionResponse struct { Version storagedriver.Version - Error *responseError + Error *ResponseError } // ReadStreamResponse is a response for a ReadStream request type ReadStreamResponse struct { Reader io.ReadCloser - Error *responseError + Error *ResponseError } // WriteStreamResponse is a response for a WriteStream request type WriteStreamResponse struct { - Error *responseError + Error *ResponseError } // CurrentSizeResponse is a response for a CurrentSize request type CurrentSizeResponse struct { Position uint64 - Error *responseError + Error *ResponseError } // ListResponse is a response for a List request type ListResponse struct { Keys []string - Error *responseError + Error *ResponseError } // MoveResponse is a response for a Move request type MoveResponse struct { - Error *responseError + Error *ResponseError } // DeleteResponse is a response for a Delete request type DeleteResponse struct { - Error *responseError + Error *ResponseError } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index f374bf06..71422f93 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -33,7 +33,9 @@ func StorageDriverServer(driver storagedriver.StorageDriver) error { } else { for { receiver, err := transport.WaitReceiveChannel() - if err != nil { + if err == io.EOF { + return nil + } else if err != nil { panic(err) } go receive(driver, receiver) @@ -49,7 +51,9 @@ func receive(driver storagedriver.StorageDriver, receiver libchan.Receiver) { for { var request Request err := receiver.Receive(&request) - if err != nil { + if err == io.EOF { + return + } else if err != nil { panic(err) } go handleRequest(driver, request) @@ -70,7 +74,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { content, err := driver.GetContent(path) var response ReadStreamResponse if err != nil { - response = ReadStreamResponse{Error: ResponseError(err)} + response = ReadStreamResponse{Error: WrapError(err)} } else { response = ReadStreamResponse{Reader: ioutil.NopCloser(bytes.NewReader(content))} } @@ -87,7 +91,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { err = driver.PutContent(path, contents) } response := WriteStreamResponse{ - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { @@ -100,7 +104,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { reader, err := driver.ReadStream(path, offset) var response ReadStreamResponse if err != nil { - response = ReadStreamResponse{Error: ResponseError(err)} + response = ReadStreamResponse{Error: WrapError(err)} } else { response = ReadStreamResponse{Reader: ioutil.NopCloser(reader)} } @@ -117,7 +121,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { reader, _ := request.Parameters["Reader"].(io.ReadCloser) err := driver.WriteStream(path, offset, size, reader) response := WriteStreamResponse{ - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { @@ -128,7 +132,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { position, err := driver.CurrentSize(path) response := CurrentSizeResponse{ Position: position, - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { @@ -139,7 +143,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { keys, err := driver.List(path) response := ListResponse{ Keys: keys, - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { @@ -150,7 +154,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { destPath, _ := request.Parameters["DestPath"].(string) err := driver.Move(sourcePath, destPath) response := MoveResponse{ - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { @@ -160,7 +164,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { path, _ := request.Parameters["Path"].(string) err := driver.Delete(path) response := DeleteResponse{ - Error: ResponseError(err), + Error: WrapError(err), } err = request.ResponseChannel.Send(&response) if err != nil { diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 7ef7df6b..82071b2e 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker-registry/storagedriver/factory" ) -const DriverName = "s3" +const driverName = "s3" // minChunkSize defines the minimum multipart upload chunk size // S3 API requires multipart upload chunks to be at least 5MB @@ -23,7 +23,7 @@ const minChunkSize = uint64(5 * 1024 * 1024) const listPartsMax = 1000 func init() { - factory.Register(DriverName, &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) } // s3DriverFactory implements the factory.StorageDriverFactory interface @@ -33,22 +33,22 @@ func (factory *s3DriverFactory) Create(parameters map[string]string) (storagedri return FromParameters(parameters) } -// S3Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 // Objects are stored at absolute keys in the provided bucket -type S3Driver struct { +type Driver struct { S3 *s3.S3 Bucket *s3.Bucket Encrypt bool } -// FromParameters constructs a new S3Driver with a given parameters map +// FromParameters constructs a new Driver with a given parameters map // Required parameters: // - accesskey // - secretkey // - region // - bucket // - encrypt -func FromParameters(parameters map[string]string) (*S3Driver, error) { +func FromParameters(parameters map[string]string) (*Driver, error) { accessKey, ok := parameters["accesskey"] if !ok || accessKey == "" { return nil, fmt.Errorf("No accesskey parameter provided") @@ -85,9 +85,9 @@ func FromParameters(parameters map[string]string) (*S3Driver, error) { return New(accessKey, secretKey, region, encryptBool, bucket) } -// New constructs a new S3Driver with the given AWS credentials, region, encryption flag, and +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and // bucketName -func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*S3Driver, error) { +func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bucketName string) (*Driver, error) { auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey} s3obj := s3.New(auth, region) bucket := s3obj.Bucket(bucketName) @@ -99,20 +99,24 @@ func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bu } } - return &S3Driver{s3obj, bucket, encrypt}, nil + return &Driver{s3obj, bucket, encrypt}, nil } // Implement the storagedriver.StorageDriver interface -func (d *S3Driver) GetContent(path string) ([]byte, error) { +// GetContent retrieves the content stored at "path" as a []byte. +func (d *Driver) GetContent(path string) ([]byte, error) { return d.Bucket.Get(path) } -func (d *S3Driver) PutContent(path string, contents []byte) error { +// PutContent stores the []byte content at a location designated by "path". +func (d *Driver) PutContent(path string, contents []byte) error { return d.Bucket.Put(path, contents, d.getContentType(), getPermissions(), d.getOptions()) } -func (d *S3Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatUint(offset, 10)+"-") @@ -124,7 +128,9 @@ func (d *S3Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) return nil, err } -func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { defer reader.Close() chunkSize := minChunkSize @@ -177,7 +183,9 @@ func (d *S3Driver) WriteStream(path string, offset, size uint64, reader io.ReadC return nil } -func (d *S3Driver) CurrentSize(path string) (uint64, error) { +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. +func (d *Driver) CurrentSize(path string) (uint64, error) { _, parts, err := d.getAllParts(path) if err != nil { return 0, err @@ -190,7 +198,9 @@ func (d *S3Driver) CurrentSize(path string) (uint64, error) { return (((uint64(len(parts)) - 1) * uint64(parts[0].Size)) + uint64(parts[len(parts)-1].Size)), nil } -func (d *S3Driver) List(path string) ([]string, error) { +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *Driver) List(path string) ([]string, error) { if path[len(path)-1] != '/' { path = path + "/" } @@ -224,7 +234,9 @@ func (d *S3Driver) List(path string) ([]string, error) { return append(files, directories...), nil } -func (d *S3Driver) Move(sourcePath string, destPath string) error { +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *Driver) Move(sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ _, err := d.Bucket.PutCopy(destPath, getPermissions(), s3.CopyOptions{Options: d.getOptions(), MetadataDirective: "", ContentType: d.getContentType()}, @@ -236,7 +248,8 @@ func (d *S3Driver) Move(sourcePath string, destPath string) error { return d.Delete(sourcePath) } -func (d *S3Driver) Delete(path string) error { +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *Driver) Delete(path string) error { listResponse, err := d.Bucket.List(path, "", "", listPartsMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} @@ -263,30 +276,29 @@ func (d *S3Driver) Delete(path string) error { return nil } -func (d *S3Driver) getHighestIdMulti(path string) (multi *s3.Multi, err error) { +func (d *Driver) getHighestIDMulti(path string) (multi *s3.Multi, err error) { multis, _, err := d.Bucket.ListMulti(path, "") if err != nil && !hasCode(err, "NoSuchUpload") { return nil, err } - uploadId := "" + uploadID := "" if len(multis) > 0 { for _, m := range multis { - if m.Key == path && m.UploadId >= uploadId { - uploadId = m.UploadId + if m.Key == path && m.UploadId >= uploadID { + uploadID = m.UploadId multi = m } } return multi, nil - } else { - multi, err := d.Bucket.InitMulti(path, d.getContentType(), getPermissions(), d.getOptions()) - return multi, err } + multi, err = d.Bucket.InitMulti(path, d.getContentType(), getPermissions(), d.getOptions()) + return multi, err } -func (d *S3Driver) getAllParts(path string) (*s3.Multi, []s3.Part, error) { - multi, err := d.getHighestIdMulti(path) +func (d *Driver) getAllParts(path string) (*s3.Multi, []s3.Part, error) { + multi, err := d.getHighestIDMulti(path) if err != nil { return nil, nil, err } @@ -300,7 +312,7 @@ func hasCode(err error, code string) bool { return ok && s3err.Code == code } -func (d *S3Driver) getOptions() s3.Options { +func (d *Driver) getOptions() s3.Options { return s3.Options{SSE: d.Encrypt} } @@ -308,6 +320,6 @@ func getPermissions() s3.ACL { return s3.Private } -func (d *S3Driver) getContentType() string { +func (d *Driver) getContentType() string { return "application/octet-stream" } diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go index 576c3623..6d7b3ff7 100644 --- a/storagedriver/s3/s3_test.go +++ b/storagedriver/s3/s3_test.go @@ -8,11 +8,12 @@ import ( "github.com/crowdmob/goamz/aws" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/testsuites" - . "gopkg.in/check.v1" + + "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { TestingT(t) } +func Test(t *testing.T) { check.TestingT(t) } func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") @@ -38,7 +39,7 @@ func init() { } testsuites.RegisterInProcessSuite(s3DriverConstructor, skipCheck) - testsuites.RegisterIPCSuite(DriverName, map[string]string{ + testsuites.RegisterIPCSuite(driverName, map[string]string{ "accesskey": accessKey, "secretkey": secretKey, "region": region, diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index fcf691c6..1b6c5c00 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -7,67 +7,73 @@ import ( "strings" ) -// Version is a string representing the storage driver version, of the form Major.Minor. -// The registry must accept storage drivers with equal major version and greater minor version, -// but may not be compatible with older storage driver versions. +// Version is a string representing the storage driver version, of the form +// Major.Minor. +// The registry must accept storage drivers with equal major version and greater +// minor version, but may not be compatible with older storage driver versions. type Version string -// Major returns the major (primary) component of a version +// Major returns the major (primary) component of a version. func (version Version) Major() uint { majorPart := strings.Split(string(version), ".")[0] major, _ := strconv.ParseUint(majorPart, 10, 0) return uint(major) } -// Minor returns the minor (secondary) component of a version +// Minor returns the minor (secondary) component of a version. func (version Version) Minor() uint { minorPart := strings.Split(string(version), ".")[1] minor, _ := strconv.ParseUint(minorPart, 10, 0) return uint(minor) } -// CurrentVersion is the current storage driver Version +// CurrentVersion is the current storage driver Version. const CurrentVersion Version = "0.1" -// StorageDriver defines methods that a Storage Driver must implement for a filesystem-like -// key/value object storage +// StorageDriver defines methods that a Storage Driver must implement for a +// filesystem-like key/value object storage. type StorageDriver interface { - // GetContent retrieves the content stored at "path" as a []byte - // Should primarily be used for small objects + // GetContent retrieves the content stored at "path" as a []byte. + // This should primarily be used for small objects. GetContent(path string) ([]byte, error) - // PutContent stores the []byte content at a location designated by "path" - // Should primarily be used for small objects + // PutContent stores the []byte content at a location designated by "path". + // This should primarily be used for small objects. PutContent(path string, content []byte) error - // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a given byte - // offset - // May be used to resume reading a stream by providing a nonzero offset + // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // with a given byte offset. + // May be used to resume reading a stream by providing a nonzero offset. ReadStream(path string, offset uint64) (io.ReadCloser, error) - // WriteStream stores the contents of the provided io.ReadCloser at a location designated by - // the given path - // The driver will know it has received the full contents when it has read "size" bytes - // May be used to resume writing a stream by providing a nonzero offset - // The offset must be no larger than the CurrentSize for this path + // WriteStream stores the contents of the provided io.ReadCloser at a + // location designated by the given path. + // The driver will know it has received the full contents when it has read + // "size" bytes. + // May be used to resume writing a stream by providing a nonzero offset. + // The offset must be no larger than the CurrentSize for this path. WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error - // CurrentSize retrieves the curernt size in bytes of the object at the given path - // It should be safe to read or write anywhere up to this point + // CurrentSize retrieves the curernt size in bytes of the object at the + // given path. + // It should be safe to read or write anywhere up to this point. CurrentSize(path string) (uint64, error) - // List returns a list of the objects that are direct descendants of the given path + // List returns a list of the objects that are direct descendants of the + //given path. List(path string) ([]string, error) - // Move moves an object stored at sourcePath to destPath, removing the original object - // Note: This may be no more efficient than a copy followed by a delete for many implementations + // Move moves an object stored at sourcePath to destPath, removing the + // original object. + // Note: This may be no more efficient than a copy followed by a delete for + // many implementations. Move(sourcePath string, destPath string) error - // Delete recursively deletes all objects stored at "path" and its subpaths + // Delete recursively deletes all objects stored at "path" and its subpaths. Delete(path string) error } -// PathNotFoundError is returned when operating on a nonexistent path +// PathNotFoundError is returned when operating on a nonexistent path. type PathNotFoundError struct { Path string } @@ -76,7 +82,8 @@ func (err PathNotFoundError) Error() string { return fmt.Sprintf("Path not found: %s", err.Path) } -// InvalidOffsetError is returned when attempting to read or write from an invalid offset +// InvalidOffsetError is returned when attempting to read or write from an +// invalid offset. type InvalidOffsetError struct { Path string Offset uint64 diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index d2859913..217237f7 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -11,15 +11,15 @@ import ( "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/ipc" - . "gopkg.in/check.v1" + "gopkg.in/check.v1" ) -// Hook up gocheck into the "go test" runner -func Test(t *testing.T) { TestingT(t) } +// Test hooks up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } // RegisterInProcessSuite registers an in-process storage driver test suite with the go test runner func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - Suite(&DriverSuite{ + check.Suite(&DriverSuite{ Constructor: driverConstructor, SkipCheck: skipCheck, }) @@ -50,7 +50,7 @@ func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) return driverClient.Stop() } - Suite(suite) + check.Suite(suite) } // SkipCheck is a function used to determine if a test suite should be skipped @@ -75,77 +75,93 @@ type DriverSuite struct { storagedriver.StorageDriver } -func (suite *DriverSuite) SetUpSuite(c *C) { +// SetUpSuite sets up the gocheck test suite +func (suite *DriverSuite) SetUpSuite(c *check.C) { if reason := suite.SkipCheck(); reason != "" { c.Skip(reason) } d, err := suite.Constructor() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) suite.StorageDriver = d } -func (suite *DriverSuite) TearDownSuite(c *C) { +// TearDownSuite tears down the gocheck test suite +func (suite *DriverSuite) TearDownSuite(c *check.C) { if suite.Teardown != nil { err := suite.Teardown() - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } } -func (suite *DriverSuite) TestWriteRead1(c *C) { +// TestWriteRead1 tests a simple write-read workflow +func (suite *DriverSuite) TestWriteRead1(c *check.C) { filename := randomString(32) contents := []byte("a") suite.writeReadCompare(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteRead2(c *C) { +// TestWriteRead2 tests a simple write-read workflow with unicode data +func (suite *DriverSuite) TestWriteRead2(c *check.C) { filename := randomString(32) contents := []byte("\xc3\x9f") suite.writeReadCompare(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteRead3(c *C) { +// TestWriteRead3 tests a simple write-read workflow with a small string +func (suite *DriverSuite) TestWriteRead3(c *check.C) { filename := randomString(32) contents := []byte(randomString(32)) suite.writeReadCompare(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteRead4(c *C) { +// TestWriteRead4 tests a simple write-read workflow with 1MB of data +func (suite *DriverSuite) TestWriteRead4(c *check.C) { filename := randomString(32) contents := []byte(randomString(1024 * 1024)) suite.writeReadCompare(c, filename, contents, contents) } -func (suite *DriverSuite) TestReadNonexistent(c *C) { +// TestReadNonexistent tests reading content from an empty path +func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestWriteReadStreams1(c *C) { +// TestWriteReadStreams1 tests a simple write-read streaming workflow +func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { filename := randomString(32) contents := []byte("a") suite.writeReadCompareStreams(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteReadStreams2(c *C) { +// TestWriteReadStreams2 tests a simple write-read streaming workflow with +// unicode data +func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { filename := randomString(32) contents := []byte("\xc3\x9f") suite.writeReadCompareStreams(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteReadStreams3(c *C) { +// TestWriteReadStreams3 tests a simple write-read streaming workflow with a +// small amount of data +func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { filename := randomString(32) contents := []byte(randomString(32)) suite.writeReadCompareStreams(c, filename, contents, contents) } -func (suite *DriverSuite) TestWriteReadStreams4(c *C) { +// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB +// of data +func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { filename := randomString(32) contents := []byte(randomString(1024 * 1024)) suite.writeReadCompareStreams(c, filename, contents, contents) } -func (suite *DriverSuite) TestContinueStreamAppend(c *C) { +// TestContinueStreamAppend tests that a stream write can be appended to without +// corrupting the data +func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) @@ -158,31 +174,33 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *C) { fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) err := suite.StorageDriver.WriteStream(filename, 0, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(contentsChunk1))) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) offset, err := suite.StorageDriver.CurrentSize(filename) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if offset > chunkSize { c.Fatalf("Offset too large, %d > %d", offset, chunkSize) } err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) offset, err = suite.StorageDriver.CurrentSize(filename) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) if offset > 2*chunkSize { c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize) } err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:]))) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, IsNil) - c.Assert(received, DeepEquals, fullContents) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) } -func (suite *DriverSuite) TestReadStreamWithOffset(c *C) { +// TestReadStreamWithOffset tests that the appropriate data is streamed when +// reading with a given offset +func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) @@ -193,43 +211,46 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *C) { contentsChunk3 := []byte(randomString(chunkSize)) err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer reader.Close() readContents, err := ioutil.ReadAll(reader) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(readContents, DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) + c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) reader, err = suite.StorageDriver.ReadStream(filename, chunkSize) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer reader.Close() readContents, err = ioutil.ReadAll(reader) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(readContents, DeepEquals, append(contentsChunk2, contentsChunk3...)) + c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*2) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer reader.Close() readContents, err = ioutil.ReadAll(reader) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(readContents, DeepEquals, contentsChunk3) + c.Assert(readContents, check.DeepEquals, contentsChunk3) } -func (suite *DriverSuite) TestReadNonexistentStream(c *C) { +// TestReadNonexistentStream tests that reading a stream for a nonexistent path +// fails +func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestList(c *C) { +// TestList checks the returned list of keys after populating a directory tree +func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := randomString(uint64(8 + rand.Intn(8))) defer suite.StorageDriver.Delete(rootDirectory) @@ -239,22 +260,24 @@ func (suite *DriverSuite) TestList(c *C) { childFile := parentDirectory + "/" + randomString(uint64(8+rand.Intn(8))) childFiles[i] = childFile err := suite.StorageDriver.PutContent(childFile, []byte(randomString(32))) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) } sort.Strings(childFiles) keys, err := suite.StorageDriver.List(rootDirectory) - c.Assert(err, IsNil) - c.Assert(keys, DeepEquals, []string{parentDirectory}) + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{parentDirectory}) keys, err = suite.StorageDriver.List(parentDirectory) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) sort.Strings(keys) - c.Assert(keys, DeepEquals, childFiles) + c.Assert(keys, check.DeepEquals, childFiles) } -func (suite *DriverSuite) TestMove(c *C) { +// TestMove checks that a moved object no longer exists at the source path and +// does exist at the destination +func (suite *DriverSuite) TestMove(c *check.C) { contents := []byte(randomString(32)) sourcePath := randomString(32) destPath := randomString(32) @@ -263,50 +286,55 @@ func (suite *DriverSuite) TestMove(c *C) { defer suite.StorageDriver.Delete(destPath) err := suite.StorageDriver.PutContent(sourcePath, contents) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = suite.StorageDriver.Move(sourcePath, destPath) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(destPath) - c.Assert(err, IsNil) - c.Assert(received, DeepEquals, contents) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) _, err = suite.StorageDriver.GetContent(sourcePath) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestMoveNonexistent(c *C) { +// TestMoveNonexistent checks that moving a nonexistent key fails +func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { sourcePath := randomString(32) destPath := randomString(32) err := suite.StorageDriver.Move(sourcePath, destPath) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestRemove(c *C) { +// TestDelete checks that the delete operation removes data from the storage +// driver +func (suite *DriverSuite) TestDelete(c *check.C) { filename := randomString(32) contents := []byte(randomString(32)) defer suite.StorageDriver.Delete(filename) err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = suite.StorageDriver.Delete(filename) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(filename) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestRemoveNonexistent(c *C) { +// TestDeleteNonexistent checks that removing a nonexistent key fails +func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomString(32) err := suite.StorageDriver.Delete(filename) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) TestRemoveFolder(c *C) { +// TestDeleteFolder checks that deleting a folder removes all child elements +func (suite *DriverSuite) TestDeleteFolder(c *check.C) { dirname := randomString(32) filename1 := randomString(32) filename2 := randomString(32) @@ -316,47 +344,47 @@ func (suite *DriverSuite) TestRemoveFolder(c *C) { defer suite.StorageDriver.Delete(path.Join(dirname, filename2)) err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) err = suite.StorageDriver.Delete(dirname) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) - c.Assert(err, NotNil) + c.Assert(err, check.NotNil) } -func (suite *DriverSuite) writeReadCompare(c *C, filename string, contents, expected []byte) { +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents, expected []byte) { defer suite.StorageDriver.Delete(filename) err := suite.StorageDriver.PutContent(filename, contents) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) readContents, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(readContents, DeepEquals, contents) + c.Assert(readContents, check.DeepEquals, contents) } -func (suite *DriverSuite) writeReadCompareStreams(c *C, filename string, contents, expected []byte) { +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents, expected []byte) { defer suite.StorageDriver.Delete(filename) err := suite.StorageDriver.WriteStream(filename, 0, uint64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) reader, err := suite.StorageDriver.ReadStream(filename, 0) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) defer reader.Close() readContents, err := ioutil.ReadAll(reader) - c.Assert(err, IsNil) + c.Assert(err, check.IsNil) - c.Assert(readContents, DeepEquals, contents) + c.Assert(readContents, check.DeepEquals, contents) } var pathChars = []byte("abcdefghijklmnopqrstuvwxyz") From 0e1b1cc04e9f97de88995a601688fb9f2f8e21a2 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 13 Nov 2014 18:42:49 -0800 Subject: [PATCH 044/165] Adds push/pull client functionality These methods rely on an ObjectStore interface, which is meant to approximate the storage behavior of the docker engine. This is very much subject to change. --- client/client.go | 6 +- client/client_test.go | 238 ++++++++++++++++++++++++++++++++++++++++++ client/objectstore.go | 158 ++++++++++++++++++++++++++++ client/pull.go | 93 +++++++++++++++++ client/push.go | 95 +++++++++++++++++ test/test.go | 97 +++++++++++++++++ 6 files changed, 684 insertions(+), 3 deletions(-) create mode 100644 client/client_test.go create mode 100644 client/objectstore.go create mode 100644 client/pull.go create mode 100644 client/push.go create mode 100644 test/test.go diff --git a/client/client.go b/client/client.go index c4158695..2ea0e091 100644 --- a/client/client.go +++ b/client/client.go @@ -183,7 +183,7 @@ func (r *clientImpl) DeleteImage(name, tag string) error { } func (r *clientImpl) ListImageTags(name string) ([]string, error) { - response, err := http.Get(fmt.Sprintf("%s/v2/%s/tags", r.Endpoint, name)) + response, err := http.Get(fmt.Sprintf("%s/v2/%s/tags/list", r.Endpoint, name)) if err != nil { return nil, err } @@ -264,7 +264,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { postRequest, err := http.NewRequest("POST", - fmt.Sprintf("%s/v2/%s/layer/%s/upload", r.Endpoint, name, tarsum), nil) + fmt.Sprintf("%s/v2/%s/layer/%s/upload/", r.Endpoint, name, tarsum), nil) if err != nil { return "", err } @@ -329,7 +329,7 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in return err } - queryValues := new(url.Values) + queryValues := url.Values{} queryValues.Set("length", fmt.Sprint(length)) queryValues.Set(checksum.HashAlgorithm, checksum.Sum) putRequest.URL.RawQuery = queryValues.Encode() diff --git a/client/client_test.go b/client/client_test.go new file mode 100644 index 00000000..e900463a --- /dev/null +++ b/client/client_test.go @@ -0,0 +1,238 @@ +package client + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "github.com/docker/docker-registry" + "github.com/docker/docker-registry/test" +) + +type testLayer struct { + tarSum string + contents []byte +} + +func TestPush(t *testing.T) { + name := "hello/world" + tag := "sometag" + testLayers := []testLayer{ + { + tarSum: "12345", + contents: []byte("some contents"), + }, + { + tarSum: "98765", + contents: []byte("some other contents"), + }, + } + uploadLocations := make([]string, len(testLayers)) + layers := make([]registry.FSLayer, len(testLayers)) + history := make([]registry.ManifestHistory, len(testLayers)) + + for i, layer := range testLayers { + uploadLocations[i] = fmt.Sprintf("/v2/%s/layer/%s/upload-location-%d", name, layer.tarSum, i) + layers[i] = registry.FSLayer{BlobSum: layer.tarSum} + history[i] = registry.ManifestHistory{V1Compatibility: layer.tarSum} + } + + manifest := ®istry.ImageManifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: layers, + History: history, + SchemaVersion: 1, + } + manifestBytes, err := json.Marshal(manifest) + + layerRequestResponseMappings := make([]test.RequestResponseMapping, 2*len(testLayers)) + for i, layer := range testLayers { + layerRequestResponseMappings[2*i] = test.RequestResponseMapping{ + Request: test.Request{ + Method: "POST", + Route: "/v2/" + name + "/layer/" + layer.tarSum + "/upload/", + }, + Responses: []test.Response{ + { + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {uploadLocations[i]}, + }), + }, + }, + } + layerRequestResponseMappings[2*i+1] = test.RequestResponseMapping{ + Request: test.Request{ + Method: "PUT", + Route: uploadLocations[i], + Body: layer.contents, + }, + Responses: []test.Response{ + { + StatusCode: http.StatusCreated, + }, + }, + } + } + + handler := test.NewHandler(append(layerRequestResponseMappings, test.RequestResponseMap{ + test.RequestResponseMapping{ + Request: test.Request{ + Method: "PUT", + Route: "/v2/" + name + "/image/" + tag, + Body: manifestBytes, + }, + Responses: []test.Response{ + { + StatusCode: http.StatusOK, + }, + }, + }, + }...)) + server := httptest.NewServer(handler) + client := New(server.URL) + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*registry.ImageManifest), + layerStorage: make(map[string]Layer), + } + + for _, layer := range testLayers { + l, err := objectStore.Layer(layer.tarSum) + if err != nil { + t.Fatal(err) + } + + writer, err := l.Writer() + if err != nil { + t.Fatal(err) + } + + writer.Write(layer.contents) + writer.Close() + } + + objectStore.WriteManifest(name, tag, manifest) + + err = Push(client, objectStore, name, tag) + if err != nil { + t.Fatal(err) + } +} + +func TestPull(t *testing.T) { + name := "hello/world" + tag := "sometag" + testLayers := []testLayer{ + { + tarSum: "12345", + contents: []byte("some contents"), + }, + { + tarSum: "98765", + contents: []byte("some other contents"), + }, + } + layers := make([]registry.FSLayer, len(testLayers)) + history := make([]registry.ManifestHistory, len(testLayers)) + + for i, layer := range testLayers { + layers[i] = registry.FSLayer{BlobSum: layer.tarSum} + history[i] = registry.ManifestHistory{V1Compatibility: layer.tarSum} + } + + manifest := ®istry.ImageManifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: layers, + History: history, + SchemaVersion: 1, + } + manifestBytes, err := json.Marshal(manifest) + + layerRequestResponseMappings := make([]test.RequestResponseMapping, len(testLayers)) + for i, layer := range testLayers { + layerRequestResponseMappings[i] = test.RequestResponseMapping{ + Request: test.Request{ + Method: "GET", + Route: "/v2/" + name + "/layer/" + layer.tarSum, + }, + Responses: []test.Response{ + { + StatusCode: http.StatusOK, + Body: layer.contents, + }, + }, + } + } + + handler := test.NewHandler(append(layerRequestResponseMappings, test.RequestResponseMap{ + test.RequestResponseMapping{ + Request: test.Request{ + Method: "GET", + Route: "/v2/" + name + "/image/" + tag, + }, + Responses: []test.Response{ + { + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + }, + }, + }...)) + server := httptest.NewServer(handler) + client := New(server.URL) + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*registry.ImageManifest), + layerStorage: make(map[string]Layer), + } + + err = Pull(client, objectStore, name, tag) + if err != nil { + t.Fatal(err) + } + + m, err := objectStore.Manifest(name, tag) + if err != nil { + t.Fatal(err) + } + + mBytes, err := json.Marshal(m) + if err != nil { + t.Fatal(err) + } + + if string(mBytes) != string(manifestBytes) { + t.Fatal("Incorrect manifest") + } + + for _, layer := range testLayers { + l, err := objectStore.Layer(layer.tarSum) + if err != nil { + t.Fatal(err) + } + + reader, err := l.Reader() + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + layerBytes, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if string(layerBytes) != string(layer.contents) { + t.Fatal("Incorrect layer") + } + } +} diff --git a/client/objectstore.go b/client/objectstore.go new file mode 100644 index 00000000..d8e2ac76 --- /dev/null +++ b/client/objectstore.go @@ -0,0 +1,158 @@ +package client + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/docker/docker-registry" +) + +var ( + // ErrLayerAlreadyExists is returned when attempting to create a layer with + // a tarsum that is already in use. + ErrLayerAlreadyExists = errors.New("Layer already exists") + + // ErrLayerLocked is returned when attempting to write to a layer which is + // currently being written to. + ErrLayerLocked = errors.New("Layer locked") +) + +// ObjectStore is an interface which is designed to approximate the docker +// engine storage. This interface is subject to change to conform to the +// future requirements of the engine. +type ObjectStore interface { + // Manifest retrieves the image manifest stored at the given repository name + // and tag + Manifest(name, tag string) (*registry.ImageManifest, error) + + // WriteManifest stores an image manifest at the given repository name and + // tag + WriteManifest(name, tag string, manifest *registry.ImageManifest) error + + // Layer returns a handle to a layer for reading and writing + Layer(blobSum string) (Layer, error) +} + +// Layer is a generic image layer interface. +// A Layer may only be written to once +type Layer interface { + // Reader returns an io.ReadCloser which reads the contents of the layer + Reader() (io.ReadCloser, error) + + // Writer returns an io.WriteCloser which may write the contents of the + // layer. This method may only be called once per Layer, and the contents + // are made available on Close + Writer() (io.WriteCloser, error) + + // Wait blocks until the Layer can be read from + Wait() error +} + +// memoryObjectStore is an in-memory implementation of the ObjectStore interface +type memoryObjectStore struct { + mutex *sync.Mutex + manifestStorage map[string]*registry.ImageManifest + layerStorage map[string]Layer +} + +func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageManifest, error) { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + manifest, ok := objStore.manifestStorage[name+":"+tag] + if !ok { + return nil, fmt.Errorf("No manifest found with Name: %q, Tag: %q", name, tag) + } + return manifest, nil +} + +func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *registry.ImageManifest) error { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + objStore.manifestStorage[name+":"+tag] = manifest + return nil +} + +func (objStore *memoryObjectStore) Layer(blobSum string) (Layer, error) { + objStore.mutex.Lock() + defer objStore.mutex.Unlock() + + layer, ok := objStore.layerStorage[blobSum] + if !ok { + layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} + objStore.layerStorage[blobSum] = layer + } + + return layer, nil +} + +type memoryLayer struct { + cond *sync.Cond + buffer *bytes.Buffer + written bool +} + +func (ml *memoryLayer) Writer() (io.WriteCloser, error) { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.buffer != nil { + if !ml.written { + return nil, ErrLayerLocked + } + return nil, ErrLayerAlreadyExists + } + + ml.buffer = new(bytes.Buffer) + return &memoryLayerWriter{cond: ml.cond, buffer: ml.buffer, done: &ml.written}, nil +} + +func (ml *memoryLayer) Reader() (io.ReadCloser, error) { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.buffer == nil { + return nil, fmt.Errorf("Layer has not been written to yet") + } + if !ml.written { + return nil, ErrLayerLocked + } + + return ioutil.NopCloser(bytes.NewReader(ml.buffer.Bytes())), nil +} + +func (ml *memoryLayer) Wait() error { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.buffer == nil { + return fmt.Errorf("No writer to wait on") + } + + for !ml.written { + ml.cond.Wait() + } + + return nil +} + +type memoryLayerWriter struct { + cond *sync.Cond + buffer *bytes.Buffer + done *bool +} + +func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { + return mlw.buffer.Write(p) +} + +func (mlw *memoryLayerWriter) Close() error { + *mlw.done = true + mlw.cond.Broadcast() + return nil +} diff --git a/client/pull.go b/client/pull.go new file mode 100644 index 00000000..91c7283a --- /dev/null +++ b/client/pull.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + "io" + + log "github.com/Sirupsen/logrus" +) + +// Pull implements a client pull workflow for the image defined by the given +// name and tag pair, using the given ObjectStore for local manifest and layer +// storage +func Pull(c Client, objectStore ObjectStore, name, tag string) error { + manifest, err := c.GetImageManifest(name, tag) + if err != nil { + return err + } + log.WithField("manifest", manifest).Info("Pulled manifest") + + if len(manifest.FSLayers) != len(manifest.History) { + return fmt.Errorf("Length of history not equal to number of layers") + } + if len(manifest.FSLayers) == 0 { + return fmt.Errorf("Image has no layers") + } + + for _, fsLayer := range manifest.FSLayers { + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + + writer, err := layer.Writer() + if err == ErrLayerAlreadyExists { + log.WithField("layer", fsLayer).Info("Layer already exists") + continue + } + if err == ErrLayerLocked { + log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") + layer.Wait() + continue + } + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + defer writer.Close() + + layerReader, length, err := c.GetImageLayer(name, fsLayer.BlobSum, 0) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + defer layerReader.Close() + + copied, err := io.Copy(writer, layerReader) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + if copied != int64(length) { + log.WithFields(log.Fields{ + "expected": length, + "written": copied, + "layer": fsLayer, + }).Warn("Wrote incorrect number of bytes for layer") + } + } + + err = objectStore.WriteManifest(name, tag, manifest) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "manifest": manifest, + }).Warn("Unable to write image manifest") + return err + } + + return nil +} diff --git a/client/push.go b/client/push.go new file mode 100644 index 00000000..4b9634e0 --- /dev/null +++ b/client/push.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "crypto/sha1" + "io" + "io/ioutil" + + "github.com/docker/docker-registry" + + log "github.com/Sirupsen/logrus" +) + +// Push implements a client push workflow for the image defined by the given +// name and tag pair, using the given ObjectStore for local manifest and layer +// storage +func Push(c Client, objectStore ObjectStore, name, tag string) error { + manifest, err := objectStore.Manifest(name, tag) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "name": name, + "tag": tag, + }).Info("No image found") + return err + } + + for _, fsLayer := range manifest.FSLayers { + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + layerReader, err := layer.Reader() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + location, err := c.InitiateLayerUpload(name, fsLayer.BlobSum) + if _, ok := err.(*registry.LayerAlreadyExistsError); ok { + log.WithField("layer", fsLayer).Info("Layer already exists") + continue + } + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + layerBuffer := new(bytes.Buffer) + checksum := sha1.New() + teeReader := io.TeeReader(layerReader, checksum) + + _, err = io.Copy(layerBuffer, teeReader) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + err = c.UploadLayer(location, ioutil.NopCloser(layerBuffer), layerBuffer.Len(), + ®istry.Checksum{HashAlgorithm: "sha1", Sum: string(checksum.Sum(nil))}, + ) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + } + + err = c.PutImageManifest(name, tag, manifest) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "manifest": manifest, + }).Warn("Unable to upload manifest") + return err + } + + return nil +} diff --git a/test/test.go b/test/test.go new file mode 100644 index 00000000..71588819 --- /dev/null +++ b/test/test.go @@ -0,0 +1,97 @@ +package test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// RequestResponseMap is a mapping from Requests to Responses +type RequestResponseMap []RequestResponseMapping + +// RequestResponseMapping defines an ordered list of Responses to be sent in +// response to a given Request +type RequestResponseMapping struct { + Request Request + Responses []Response +} + +// TODO(bbland): add support for request headers + +// Request is a simplified http.Request object +type Request struct { + // Method is the http method of the request, for example GET + Method string + + // Route is the http route of this request + Route string + + // Body is the byte contents of the http request + Body []byte +} + +func (r Request) String() string { + return fmt.Sprintf("%s %s\n%s", r.Method, r.Route, r.Body) +} + +// Response is a simplified http.Response object +type Response struct { + // Statuscode is the http status code of the Response + StatusCode int + + // Headers are the http headers of this Response + Headers http.Header + + // Body is the response body + Body []byte +} + +// testHandler is an http.Handler with a defined mapping from Request to an +// ordered list of Response objects +type testHandler struct { + responseMap map[string][]Response +} + +// NewHandler returns a new test handler that responds to defined requests +// with specified responses +// Each time a Request is received, the next Response is returned in the +// mapping, until no Responses are defined, at which point a 404 is sent back +func NewHandler(requestResponseMap RequestResponseMap) http.Handler { + responseMap := make(map[string][]Response) + for _, mapping := range requestResponseMap { + responseMap[mapping.Request.String()] = mapping.Responses + } + return &testHandler{responseMap: responseMap} +} + +func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + requestBody, _ := ioutil.ReadAll(r.Body) + request := Request{ + Method: r.Method, + Route: r.URL.Path, + Body: requestBody, + } + + responses, ok := app.responseMap[request.String()] + + if !ok || len(responses) == 0 { + http.NotFound(w, r) + return + } + + response := responses[0] + app.responseMap[request.String()] = responses[1:] + + responseHeader := w.Header() + for k, v := range response.Headers { + responseHeader[k] = v + } + + w.WriteHeader(response.StatusCode) + + io.Copy(w, bytes.NewReader(response.Body)) +} From 28b7b82e2da45316dfe358e9e1779ac98e968dec Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 17 Nov 2014 17:33:03 -0800 Subject: [PATCH 045/165] Adds sliding-window parallelization to Push/Pull operations A layer can only be pushed/pulled if the layer preceding it by the length of the push/pull window has been successfully pushed. An error returned from pushing or pulling any layer will cause the full operation to be aborted. --- client/pull.go | 141 +++++++++++++++++++++++++++++++------------------ client/push.go | 140 ++++++++++++++++++++++++++++++------------------ 2 files changed, 178 insertions(+), 103 deletions(-) diff --git a/client/pull.go b/client/pull.go index 91c7283a..75cc9af1 100644 --- a/client/pull.go +++ b/client/pull.go @@ -4,9 +4,16 @@ import ( "fmt" "io" + "github.com/docker/docker-registry" + log "github.com/Sirupsen/logrus" ) +// simultaneousLayerPullWindow is the size of the parallel layer pull window. +// A layer may not be pulled until the layer preceeding it by the length of the +// pull window has been successfully pulled. +const simultaneousLayerPullWindow = 4 + // Pull implements a client pull workflow for the image defined by the given // name and tag pair, using the given ObjectStore for local manifest and layer // storage @@ -24,59 +31,28 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { return fmt.Errorf("Image has no layers") } - for _, fsLayer := range manifest.FSLayers { - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err + errChans := make([]chan error, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + errChans[i] = make(chan error) + } + + // Iterate over each layer in the manifest, simultaneously pulling no more + // than simultaneousLayerPullWindow layers at a time. If an error is + // received from a layer pull, we abort the push. + for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPullWindow; i++ { + dependentLayer := i - simultaneousLayerPullWindow + if dependentLayer >= 0 { + err := <-errChans[dependentLayer] + if err != nil { + log.WithField("error", err).Warn("Pull aborted") + return err + } } - writer, err := layer.Writer() - if err == ErrLayerAlreadyExists { - log.WithField("layer", fsLayer).Info("Layer already exists") - continue - } - if err == ErrLayerLocked { - log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") - layer.Wait() - continue - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to write local layer") - return err - } - defer writer.Close() - - layerReader, length, err := c.GetImageLayer(name, fsLayer.BlobSum, 0) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - defer layerReader.Close() - - copied, err := io.Copy(writer, layerReader) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to download layer") - return err - } - if copied != int64(length) { - log.WithFields(log.Fields{ - "expected": length, - "written": copied, - "layer": fsLayer, - }).Warn("Wrote incorrect number of bytes for layer") + if i < len(manifest.FSLayers) { + go func(i int) { + errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]) + }(i) } } @@ -91,3 +67,66 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { return nil } + +func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry.FSLayer) error { + log.WithField("layer", fsLayer).Info("Pulling layer") + + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + + writer, err := layer.Writer() + if err == ErrLayerAlreadyExists { + log.WithField("layer", fsLayer).Info("Layer already exists") + return nil + } + if err == ErrLayerLocked { + log.WithField("layer", fsLayer).Info("Layer download in progress, waiting") + layer.Wait() + return nil + } + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to write local layer") + return err + } + defer writer.Close() + + layerReader, length, err := c.GetImageLayer(name, fsLayer.BlobSum, 0) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + defer layerReader.Close() + + copied, err := io.Copy(writer, layerReader) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to download layer") + return err + } + if copied != int64(length) { + log.WithFields(log.Fields{ + "expected": length, + "written": copied, + "layer": fsLayer, + }).Warn("Wrote incorrect number of bytes for layer") + return fmt.Errorf( + "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", + fsLayer, length, copied, + ) + } + return nil +} diff --git a/client/push.go b/client/push.go index 4b9634e0..a1fb0e23 100644 --- a/client/push.go +++ b/client/push.go @@ -11,6 +11,13 @@ import ( log "github.com/Sirupsen/logrus" ) +// simultaneousLayerPushWindow is the size of the parallel layer push window. +// A layer may not be pushed until the layer preceeding it by the length of the +// push window has been successfully pushed. +const simultaneousLayerPushWindow = 4 + +type pushFunction func(fsLayer registry.FSLayer) error + // Push implements a client push workflow for the image defined by the given // name and tag pair, using the given ObjectStore for local manifest and layer // storage @@ -25,60 +32,28 @@ func Push(c Client, objectStore ObjectStore, name, tag string) error { return err } - for _, fsLayer := range manifest.FSLayers { - layer, err := objectStore.Layer(fsLayer.BlobSum) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err + errChans := make([]chan error, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + errChans[i] = make(chan error) + } + + // Iterate over each layer in the manifest, simultaneously pushing no more + // than simultaneousLayerPushWindow layers at a time. If an error is + // received from a layer push, we abort the push. + for i := 0; i < len(manifest.FSLayers)+simultaneousLayerPushWindow; i++ { + dependentLayer := i - simultaneousLayerPushWindow + if dependentLayer >= 0 { + err := <-errChans[dependentLayer] + if err != nil { + log.WithField("error", err).Warn("Push aborted") + return err + } } - layerReader, err := layer.Reader() - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - - location, err := c.InitiateLayerUpload(name, fsLayer.BlobSum) - if _, ok := err.(*registry.LayerAlreadyExistsError); ok { - log.WithField("layer", fsLayer).Info("Layer already exists") - continue - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } - - layerBuffer := new(bytes.Buffer) - checksum := sha1.New() - teeReader := io.TeeReader(layerReader, checksum) - - _, err = io.Copy(layerBuffer, teeReader) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err - } - - err = c.UploadLayer(location, ioutil.NopCloser(layerBuffer), layerBuffer.Len(), - ®istry.Checksum{HashAlgorithm: "sha1", Sum: string(checksum.Sum(nil))}, - ) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err + if i < len(manifest.FSLayers) { + go func(i int) { + errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]) + }(i) } } @@ -93,3 +68,64 @@ func Push(c Client, objectStore ObjectStore, name, tag string) error { return nil } + +func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry.FSLayer) error { + log.WithField("layer", fsLayer).Info("Pushing layer") + + layer, err := objectStore.Layer(fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + layerReader, err := layer.Reader() + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + location, err := c.InitiateLayerUpload(name, fsLayer.BlobSum) + if _, ok := err.(*registry.LayerAlreadyExistsError); ok { + log.WithField("layer", fsLayer).Info("Layer already exists") + return nil + } + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + layerBuffer := new(bytes.Buffer) + checksum := sha1.New() + teeReader := io.TeeReader(layerReader, checksum) + + _, err = io.Copy(layerBuffer, teeReader) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to read local layer") + return err + } + + err = c.UploadLayer(location, ioutil.NopCloser(layerBuffer), layerBuffer.Len(), + ®istry.Checksum{HashAlgorithm: "sha1", Sum: string(checksum.Sum(nil))}, + ) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + return nil +} From 2637e29e1884abfd60a80ab78b9c1823e732a985 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 17 Nov 2014 16:29:42 -0800 Subject: [PATCH 046/165] Initial implementation of registry LayerService This change contains the initial implementation of the LayerService to power layer push and pulls on the storagedriver. The interfaces presented in this package will be used by the http application to drive most features around efficient pulls and resumable pushes. The file storage/layer.go defines the interface interactions. LayerService is the root type and supports methods to access Layer and LayerUpload objects. Pull operations are supported with LayerService.Fetch and push operations are supported with LayerService.Upload and LayerService.Resume. Reads and writes of layers are split between Layer and LayerUpload, respectively. LayerService is implemented internally with the layerStore object, which takes a storagedriver.StorageDriver and a pathMapper instance. LayerUploadState is currently exported and will likely continue to be as the interaction between it and layerUploadStore are better understood. Likely, the layerUploadStore lifecycle and implementation will be deferred to the application. Image pushes pulls will be implemented in a similar manner without the discrete, persistent upload. Much of this change is in place to get something running and working. Caveats of this change include the following: 1. Layer upload state storage is implemented on the local filesystem, separate from the storage driver. This must be replaced with using the proper backend and other state storage. This can be removed when we implement resumable hashing and tarsum calculations to avoid backend roundtrips. 2. Error handling is rather bespoke at this time. The http API implementation should really dictate the error return structure for the future, so we intend to refactor this heavily to support these errors. We'd also like to collect production data to understand how failures happen in the system as a while before moving to a particular edict around error handling. 3. The layerUploadStore, which manages layer upload storage and state is not currently exported. This will likely end up being split, with the file management portion being pointed at the storagedriver and the state storage elsewhere. 4. Access Control provisions are nearly completely missing from this change. There are details around how layerindex lookup works that are related with access controls. As the auth portions of the new API take shape, these provisions will become more clear. Please see TODOs for details and individual recommendations. --- storage/digest.go | 59 +++++ storage/layer.go | 96 ++++++++ storage/layer_test.go | 450 ++++++++++++++++++++++++++++++++++++ storage/layerreader.go | 172 ++++++++++++++ storage/layerstore.go | 203 ++++++++++++++++ storage/layerupload.go | 514 +++++++++++++++++++++++++++++++++++++++++ storage/services.go | 44 ++++ 7 files changed, 1538 insertions(+) create mode 100644 storage/digest.go create mode 100644 storage/layer.go create mode 100644 storage/layer_test.go create mode 100644 storage/layerreader.go create mode 100644 storage/layerstore.go create mode 100644 storage/layerupload.go create mode 100644 storage/services.go diff --git a/storage/digest.go b/storage/digest.go new file mode 100644 index 00000000..db5c884b --- /dev/null +++ b/storage/digest.go @@ -0,0 +1,59 @@ +package storage + +import ( + "fmt" + "hash" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg string, h hash.Hash) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) +} + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported by registry. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + parts := strings.SplitN(s, ":", 2) + if len(parts) != 2 { + return "", ErrDigestInvalidFormat + } + + switch parts[0] { + case "sha256": + break + default: + return "", ErrDigestUnsupported + } + + return Digest(s), nil +} + +// Algorithm returns the algorithm portion of the digest. +func (d Digest) Algorithm() string { + return strings.SplitN(string(d), ":", 2)[0] +} + +// Hex returns the hex digest portion of the digest. +func (d Digest) Hex() string { + return strings.SplitN(string(d), ":", 2)[1] +} diff --git a/storage/layer.go b/storage/layer.go new file mode 100644 index 00000000..bae69701 --- /dev/null +++ b/storage/layer.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + "io" + "time" +) + +// LayerService provides operations on layer files in a backend storage. +type LayerService interface { + // Exists returns true if the layer exists. + Exists(tarSum string) (bool, error) + + // Fetch the layer identifed by TarSum. + Fetch(tarSum string) (Layer, error) + + // Upload begins a layer upload, returning a handle. If the layer upload + // is already in progress or the layer has already been uploaded, this + // will return an error. + Upload(name, tarSum string) (LayerUpload, error) + + // Resume continues an in progress layer upload, returning the current + // state of the upload. + Resume(name, tarSum, uuid string) (LayerUpload, error) +} + +// Layer provides a readable and seekable layer object. Typically, +// implementations are *not* goroutine safe. +type Layer interface { + // http.ServeContent requires an efficient implementation of + // ReadSeeker.Seek(0, os.SEEK_END). + io.ReadSeeker + io.Closer + + // Name returns the repository under which this layer is linked. + Name() string // TODO(stevvooe): struggling with nomenclature: should this be "repo" or "name"? + + // TarSum returns the unique tarsum of the layer. + TarSum() string + + // CreatedAt returns the time this layer was created. Until we implement + // Stat call on storagedriver, this just returns the zero time. + CreatedAt() time.Time +} + +// LayerUpload provides a handle for working with in-progress uploads. +// Instances can be obtained from the LayerService.Upload and +// LayerService.Resume. +type LayerUpload interface { + io.WriteCloser + + // UUID returns the identifier for this upload. + UUID() string + + // Name of the repository under which the layer will be linked. + Name() string + + // TarSum identifier of the proposed layer. Resulting data must match this + // tarsum. + TarSum() string + + // Offset returns the position of the last byte written to this layer. + Offset() int64 + + // Finish marks the upload as completed, returning a valid handle to the + // uploaded layer. The final size and checksum are validated against the + // contents of the uploaded layer. The checksum should be provided in the + // format :. + Finish(size int64, digest string) (Layer, error) + + // Cancel the layer upload process. + Cancel() error +} + +var ( + // ErrLayerUnknown returned when layer cannot be found. + ErrLayerUnknown = fmt.Errorf("unknown layer") + + // ErrLayerExists returned when layer already exists + ErrLayerExists = fmt.Errorf("layer exists") + + // ErrLayerTarSumVersionUnsupported when tarsum is unsupported version. + ErrLayerTarSumVersionUnsupported = fmt.Errorf("unsupported tarsum version") + + // ErrLayerUploadUnknown returned when upload is not found. + ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") + + // ErrLayerInvalidChecksum returned when checksum/digest check fails. + ErrLayerInvalidChecksum = fmt.Errorf("invalid layer checksum") + + // ErrLayerInvalidTarsum returned when tarsum check fails. + ErrLayerInvalidTarsum = fmt.Errorf("invalid layer tarsum") + + // ErrLayerInvalidLength returned when length check fails. + ErrLayerInvalidLength = fmt.Errorf("invalid layer length") +) diff --git a/storage/layer_test.go b/storage/layer_test.go new file mode 100644 index 00000000..72187810 --- /dev/null +++ b/storage/layer_test.go @@ -0,0 +1,450 @@ +package storage + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "os" + "testing" + "time" + + "github.com/docker/docker/pkg/tarsum" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/inmemory" +) + +// TestSimpleLayerUpload covers the layer upload process, exercising common +// error paths that might be seen during an upload. +func TestSimpleLayerUpload(t *testing.T) { + randomDataReader, tarSum, err := createRandomReader() + + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + uploadStore, err := newTemporaryLocalFSLayerUploadStore() + if err != nil { + t.Fatalf("error allocating upload store: %v", err) + } + + imageName := "foo/bar" + driver := inmemory.New() + + ls := &layerStore{ + driver: driver, + pathMapper: &pathMapper{ + root: "/storage/testing", + version: storagePathVersion, + }, + uploadStore: uploadStore, + } + + h := sha256.New() + rd := io.TeeReader(randomDataReader, h) + + layerUpload, err := ls.Upload(imageName, tarSum) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Cancel the upload then restart it + if err := layerUpload.Cancel(); err != nil { + t.Fatalf("unexpected error during upload cancellation: %v", err) + } + + // Do a resume, get unknown upload + layerUpload, err = ls.Resume(imageName, tarSum, layerUpload.UUID()) + if err != ErrLayerUploadUnknown { + t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) + } + + // Restart! + layerUpload, err = ls.Upload(imageName, tarSum) + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(layerUpload, rd) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("layer data write incomplete") + } + + if layerUpload.Offset() != nn { + t.Fatalf("layerUpload not updated with correct offset: %v != %v", layerUpload.Offset(), nn) + } + layerUpload.Close() + + // Do a resume, for good fun + layerUpload, err = ls.Resume(imageName, tarSum, layerUpload.UUID()) + if err != nil { + t.Fatalf("unexpected error resuming upload: %v", err) + } + + digest := NewDigest("sha256", h) + layer, err := layerUpload.Finish(randomDataSize, string(digest)) + + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // After finishing an upload, it should no longer exist. + if _, err := ls.Resume(imageName, tarSum, layerUpload.UUID()); err != ErrLayerUploadUnknown { + t.Fatalf("expected layer upload to be unknown, got %v", err) + } + + // Test for existence. + exists, err := ls.Exists(layer.TarSum()) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v", err) + } + + if !exists { + t.Fatalf("layer should now exist") + } + + h.Reset() + nn, err = io.Copy(h, layer) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if NewDigest("sha256", h) != digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", NewDigest("sha256", h), digest) + } +} + +// TestSimpleLayerRead just creates a simple layer file and ensures that basic +// open, read, seek, read works. More specific edge cases should be covered in +// other tests. +func TestSimpleLayerRead(t *testing.T) { + imageName := "foo/bar" + driver := inmemory.New() + ls := &layerStore{ + driver: driver, + pathMapper: &pathMapper{ + root: "/storage/testing", + version: storagePathVersion, + }, + } + + randomLayerReader, tarSum, err := createRandomReader() + if err != nil { + t.Fatalf("error creating random data: %v", err) + } + + // Test for existence. + exists, err := ls.Exists(tarSum) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v", err) + } + + if exists { + t.Fatalf("layer should not exist") + } + + // Try to get the layer and make sure we get a not found error + layer, err := ls.Fetch(tarSum) + if err == nil { + t.Fatalf("error expected fetching unknown layer") + } + + if err != ErrLayerUnknown { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } else { + err = nil + } + + randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, tarSum, randomLayerReader) + if err != nil { + t.Fatalf("unexpected error writing test layer: %v", err) + } + + randomLayerSize, err := seekerSize(randomLayerReader) + if err != nil { + t.Fatalf("error getting seeker size for random layer: %v", err) + } + + layer, err = ls.Fetch(tarSum) + if err != nil { + t.Fatal(err) + } + defer layer.Close() + + // Now check the sha digest and ensure its the same + h := sha256.New() + nn, err := io.Copy(h, layer) + if err != nil && err != io.EOF { + t.Fatalf("unexpected error copying to hash: %v", err) + } + + if nn != randomLayerSize { + t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) + } + + digest := NewDigest("sha256", h) + if digest != randomLayerDigest { + t.Fatalf("fetched digest does not match: %q != %q", digest, randomLayerDigest) + } + + // Now seek back the layer, read the whole thing and check against randomLayerData + offset, err := layer.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking layer: %v", err) + } + + if offset != 0 { + t.Fatalf("seek failed: expected 0 offset, got %d", offset) + } + + p, err := ioutil.ReadAll(layer) + if err != nil { + t.Fatalf("error reading all of layer: %v", err) + } + + if len(p) != int(randomLayerSize) { + t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize) + } + + // Reset the randomLayerReader and read back the buffer + _, err = randomLayerReader.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error resetting layer reader: %v", err) + } + + randomLayerData, err := ioutil.ReadAll(randomLayerReader) + if err != nil { + t.Fatalf("random layer read failed: %v", err) + } + + if !bytes.Equal(p, randomLayerData) { + t.Fatalf("layer data not equal") + } +} + +func TestLayerReaderSeek(t *testing.T) { + // TODO(stevvooe): Ensure that all relative seeks work as advertised. + // Readers must close and re-open on command. This is important to support + // resumable and concurrent downloads via HTTP range requests. +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestLayerReadErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is a incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} + +// writeRandomLayer creates a random layer under name and tarSum using driver +// and pathMapper. An io.ReadSeeker with the data is returned, along with the +// sha256 hex digest. +func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum string, digest Digest, err error) { + reader, tarSum, err := createRandomReader() + if err != nil { + return nil, "", "", err + } + + // Now, actually create the layer. + randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) + + if _, err := reader.Seek(0, os.SEEK_SET); err != nil { + return nil, "", "", err + } + + return reader, tarSum, randomLayerDigest, err +} + +// seekerSize seeks to the end of seeker, checks the size and returns it to +// the original state, returning the size. The state of the seeker should be +// treated as unknown if an error is returned. +func seekerSize(seeker io.ReadSeeker) (int64, error) { + current, err := seeker.Seek(0, os.SEEK_CUR) + if err != nil { + return 0, err + } + + end, err := seeker.Seek(0, os.SEEK_END) + if err != nil { + return 0, err + } + + resumed, err := seeker.Seek(current, os.SEEK_SET) + if err != nil { + return 0, err + } + + if resumed != current { + return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") + } + + return end, nil +} + +// createRandomReader returns a random read seeker and its tarsum. The +// returned content will be a valid tar file with a random number of files and +// content. +func createRandomReader() (rs io.ReadSeeker, tarSum string, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + reader := bytes.NewReader(target.Bytes()) + + // A tar builder that supports tarsum inline calculation would be awesome + // here. + ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(ioutil.Discard, ts) + if nn != int64(len(target.Bytes())) { + return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) + } + + if err != nil { + return nil, "", err + } + + return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil +} + +// createTestLayer creates a simple test layer in the provided driver under +// tarsum, returning the string digest. This is implemented peicemeal and +// should probably be replaced by the uploader when it's ready. +func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name, tarSum string, content io.Reader) (Digest, error) { + h := sha256.New() + rd := io.TeeReader(content, h) + + p, err := ioutil.ReadAll(rd) + + if err != nil { + return "", nil + } + + digest := NewDigest("sha256", h) + + blobPath, err := pathMapper.path(blobPathSpec{ + alg: digest.Algorithm(), + digest: digest.Hex(), + }) + + if err := driver.PutContent(blobPath, p); err != nil { + return "", err + } + + layerIndexLinkPath, err := pathMapper.path(layerIndexLinkPathSpec{ + tarSum: tarSum, + }) + + if err != nil { + return "", err + } + + layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ + name: name, + tarSum: tarSum, + }) + + if err != nil { + return "", err + } + + if err != nil { + return "", err + } + + if err := driver.PutContent(layerLinkPath, []byte(string(NewDigest("sha256", h)))); err != nil { + return "", nil + } + + if err = driver.PutContent(layerIndexLinkPath, []byte(name)); err != nil { + return "", nil + } + + return NewDigest("sha256", h), err +} diff --git a/storage/layerreader.go b/storage/layerreader.go new file mode 100644 index 00000000..df05c367 --- /dev/null +++ b/storage/layerreader.go @@ -0,0 +1,172 @@ +package storage + +import ( + "bufio" + "fmt" + "io" + "os" + "time" +) + +// layerReadSeeker implements Layer and provides facilities for reading and +// seeking. +type layerReader struct { + layerStore *layerStore + rc io.ReadCloser + brd *bufio.Reader + + name string // repo name of this layer + tarSum string + path string + createdAt time.Time + + // offset is the current read offset + offset int64 + + // size is the total layer size, if available. + size int64 + + closedErr error // terminal error, if set, reader is closed +} + +var _ Layer = &layerReader{} + +func (lrs *layerReader) Name() string { + return lrs.name +} + +func (lrs *layerReader) TarSum() string { + return lrs.tarSum +} + +func (lrs *layerReader) CreatedAt() time.Time { + return lrs.createdAt +} + +func (lrs *layerReader) Read(p []byte) (n int, err error) { + if err := lrs.closed(); err != nil { + return 0, err + } + + rd, err := lrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + lrs.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && lrs.offset >= lrs.size { + err = io.EOF + } + + // TODO(stevvooe): More error checking is required here. If the reader + // times out for some reason, we should reset the reader so we re-open the + // connection. + + return n, err +} + +func (lrs *layerReader) Seek(offset int64, whence int) (int64, error) { + if err := lrs.closed(); err != nil { + return 0, err + } + + var err error + newOffset := lrs.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(whence) + case os.SEEK_END: + newOffset = lrs.size + int64(whence) + case os.SEEK_SET: + newOffset = int64(whence) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else if newOffset >= lrs.size { + err = fmt.Errorf("cannot seek passed end of layer") + } else { + if lrs.offset != newOffset { + lrs.resetReader() + } + + // No problems, set the offset. + lrs.offset = newOffset + } + + return lrs.offset, err +} + +// Close the layer. Should be called when the resource is no longer needed. +func (lrs *layerReader) Close() error { + if lrs.closedErr != nil { + return lrs.closedErr + } + // TODO(sday): Must export this error. + lrs.closedErr = fmt.Errorf("layer closed") + + // close and release reader chain + if lrs.rc != nil { + lrs.rc.Close() + lrs.rc = nil + } + lrs.brd = nil + + return lrs.closedErr +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (lrs *layerReader) reader() (io.Reader, error) { + if err := lrs.closed(); err != nil { + return nil, err + } + + if lrs.rc != nil { + return lrs.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := lrs.layerStore.driver.ReadStream(lrs.path, uint64(lrs.offset)) + + if err != nil { + return nil, err + } + + lrs.rc = rc + + if lrs.brd == nil { + // TODO(stevvooe): Set an optimal buffer size here. We'll have to + // understand the latency characteristics of the underlying network to + // set this correctly, so we may want to leave it to the driver. For + // out of process drivers, we'll have to optimize this buffer size for + // local communication. + lrs.brd = bufio.NewReader(lrs.rc) + } else { + lrs.brd.Reset(lrs.rc) + } + + return lrs.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (lrs *layerReader) resetReader() { + if err := lrs.closed(); err != nil { + return + } + if lrs.rc != nil { + lrs.rc.Close() + lrs.rc = nil + } +} + +func (lrs *layerReader) closed() error { + return lrs.closedErr +} diff --git a/storage/layerstore.go b/storage/layerstore.go new file mode 100644 index 00000000..e2821a83 --- /dev/null +++ b/storage/layerstore.go @@ -0,0 +1,203 @@ +package storage + +import ( + "fmt" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/storagedriver" +) + +type layerStore struct { + driver storagedriver.StorageDriver + pathMapper *pathMapper + uploadStore layerUploadStore +} + +func (ls *layerStore) Exists(tarSum string) (bool, error) { + // Because this implementation just follows blob links, an existence check + // is pretty cheap by starting and closing a fetch. + _, err := ls.Fetch(tarSum) + + if err != nil { + if err == ErrLayerUnknown { + return false, nil + } + + return false, err + } + + return true, nil +} + +func (ls *layerStore) Fetch(tarSum string) (Layer, error) { + repos, err := ls.resolveContainingRepositories(tarSum) + + if err != nil { + // TODO(stevvooe): Unknown tarsum error: need to wrap. + return nil, err + } + + // TODO(stevvooe): Access control for layer pulls need to happen here: we + // have a list of repos that "own" the tarsum that need to be checked + // against the list of repos to which we have pull access. The argument + // repos needs to be filtered against that access list. + + name, blobPath, err := ls.resolveBlobPath(repos, tarSum) + + if err != nil { + // TODO(stevvooe): Map this error correctly, perhaps in the callee. + return nil, err + } + + p, err := ls.pathMapper.path(blobPath) + if err != nil { + return nil, err + } + + // Grab the size of the layer file, ensuring that it exists, among other + // things. + size, err := ls.driver.CurrentSize(p) + + if err != nil { + // TODO(stevvooe): Handle blob/path does not exist here. + // TODO(stevvooe): Get a better understanding of the error cases here + // that don't stem from unknown path. + return nil, err + } + + // Build the layer reader and return to the client. + layer := &layerReader{ + layerStore: ls, + path: p, + name: name, + tarSum: tarSum, + + // TODO(stevvooe): Storage backend does not support modification time + // queries yet. Layers "never" change, so just return the zero value. + createdAt: time.Time{}, + + offset: 0, + size: int64(size), + } + + return layer, nil +} + +// Upload begins a layer upload, returning a handle. If the layer upload +// is already in progress or the layer has already been uploaded, this +// will return an error. +func (ls *layerStore) Upload(name, tarSum string) (LayerUpload, error) { + exists, err := ls.Exists(tarSum) + if err != nil { + return nil, err + } + + if exists { + // TODO(stevvoe): This looks simple now, but we really should only + // return the layer exists error when the layer exists AND the current + // client has access to the layer. If the client doesn't have access + // to the layer, the upload should proceed. + return nil, ErrLayerExists + } + + // NOTE(stevvooe): Consider the issues with allowing concurrent upload of + // the same two layers. Should it be disallowed? For now, we allow both + // parties to proceed and the the first one uploads the layer. + + lus, err := ls.uploadStore.New(name, tarSum) + if err != nil { + return nil, err + } + + return ls.newLayerUpload(lus), nil +} + +// Resume continues an in progress layer upload, returning the current +// state of the upload. +func (ls *layerStore) Resume(name, tarSum, uuid string) (LayerUpload, error) { + lus, err := ls.uploadStore.GetState(uuid) + + if err != nil { + return nil, err + } + + return ls.newLayerUpload(lus), nil +} + +// newLayerUpload allocates a new upload controller with the given state. +func (ls *layerStore) newLayerUpload(lus LayerUploadState) LayerUpload { + return &layerUploadController{ + LayerUploadState: lus, + layerStore: ls, + uploadStore: ls.uploadStore, + } +} + +func (ls *layerStore) resolveContainingRepositories(tarSum string) ([]string, error) { + // Lookup the layer link in the index by tarsum id. + layerIndexLinkPath, err := ls.pathMapper.path(layerIndexLinkPathSpec{tarSum: tarSum}) + if err != nil { + return nil, err + } + + layerIndexLinkContent, err := ls.driver.GetContent(layerIndexLinkPath) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return nil, ErrLayerUnknown + default: + return nil, err + } + } + + results := strings.Split(string(layerIndexLinkContent), "\n") + + // clean these up + for i, result := range results { + results[i] = strings.TrimSpace(result) + } + + return results, nil +} + +// resolveBlobId lookups up the tarSum in the various repos to find the blob +// link, returning the repo name and blob path spec or an error on failure. +func (ls *layerStore) resolveBlobPath(repos []string, tarSum string) (name string, bps blobPathSpec, err error) { + + for _, repo := range repos { + pathSpec := layerLinkPathSpec{name: repo, tarSum: tarSum} + layerLinkPath, err := ls.pathMapper.path(pathSpec) + + if err != nil { + // TODO(stevvooe): This looks very lazy, may want to collect these + // errors and report them if we exit this for loop without + // resolving the blob id. + logrus.Debugf("error building linkLayerPath (%V): %v", pathSpec, err) + continue + } + + layerLinkContent, err := ls.driver.GetContent(layerLinkPath) + if err != nil { + logrus.Debugf("error getting layerLink content (%V): %v", pathSpec, err) + continue + } + + // Yay! We've resolved our blob id and we're ready to go. + parts := strings.SplitN(strings.TrimSpace(string(layerLinkContent)), ":", 2) + + if len(parts) != 2 { + return "", bps, fmt.Errorf("invalid blob reference: %q", string(layerLinkContent)) + } + + name = repo + bp := blobPathSpec{alg: parts[0], digest: parts[1]} + + return repo, bp, nil + } + + // TODO(stevvooe): Map this error to repo not found, but it basically + // means we exited the loop above without finding a blob link. + return "", bps, fmt.Errorf("unable to resolve blog id for repos=%v and tarSum=%q", repos, tarSum) +} diff --git a/storage/layerupload.go b/storage/layerupload.go new file mode 100644 index 00000000..7ad32d75 --- /dev/null +++ b/storage/layerupload.go @@ -0,0 +1,514 @@ +package storage + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "code.google.com/p/go-uuid/uuid" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker/pkg/tarsum" + + "io" +) + +// LayerUploadState captures the state serializable state of the layer upload. +type LayerUploadState struct { + // name is the primary repository under which the layer will be linked. + Name string + + // tarSum identifies the target layer. Provided by the client. If the + // resulting tarSum does not match this value, an error should be + // returned. + TarSum string + + // UUID identifies the upload. + UUID string + + // offset contains the current progress of the upload. + Offset int64 +} + +// layerUploadController is used to control the various aspects of resumable +// layer upload. It implements the LayerUpload interface. +type layerUploadController struct { + LayerUploadState + + layerStore *layerStore + uploadStore layerUploadStore + fp layerFile + err error // terminal error, if set, controller is closed +} + +// layerFile documents the interface used while writing layer files, similar +// to *os.File. This is separate from layerReader, for now, because we want to +// store uploads on the local file system until we have write-through hashing +// support. They should be combined once this is worked out. +type layerFile interface { + io.WriteSeeker + io.Reader + io.Closer + + // Sync commits the contents of the writer to storage. + Sync() (err error) +} + +// layerUploadStore provides storage for temporary files and upload state of +// layers. This is be used by the LayerService to manage the state of ongoing +// uploads. This interface will definitely change and will most likely end up +// being exported to the app layer. Move the layer.go when it's ready to go. +type layerUploadStore interface { + New(name, tarSum string) (LayerUploadState, error) + Open(uuid string) (layerFile, error) + GetState(uuid string) (LayerUploadState, error) + SaveState(lus LayerUploadState) error + DeleteState(uuid string) error +} + +var _ LayerUpload = &layerUploadController{} + +// Name of the repository under which the layer will be linked. +func (luc *layerUploadController) Name() string { + return luc.LayerUploadState.Name +} + +// TarSum identifier of the proposed layer. Resulting data must match this +// tarsum. +func (luc *layerUploadController) TarSum() string { + return luc.LayerUploadState.TarSum +} + +// UUID returns the identifier for this upload. +func (luc *layerUploadController) UUID() string { + return luc.LayerUploadState.UUID +} + +// Offset returns the position of the last byte written to this layer. +func (luc *layerUploadController) Offset() int64 { + return luc.LayerUploadState.Offset +} + +// Finish marks the upload as completed, returning a valid handle to the +// uploaded layer. The final size and checksum are validated against the +// contents of the uploaded layer. The checksum should be provided in the +// format :. +func (luc *layerUploadController) Finish(size int64, digestStr string) (Layer, error) { + + // This section is going to be pretty ugly now. We will have to read the + // file twice. First, to get the tarsum and checksum. When those are + // available, and validated, we will upload it to the blob store and link + // it into the repository. In the future, we need to use resumable hash + // calculations for tarsum and checksum that can be calculated during the + // upload. This will allow us to cut the data directly into a temporary + // directory in the storage backend. + + fp, err := luc.file() + + if err != nil { + // Cleanup? + return nil, err + } + + digest, err := ParseDigest(digestStr) + if err != nil { + return nil, err + } + + if err := luc.validateLayer(fp, size, digest); err != nil { + // Cleanup? + return nil, err + } + + if err := luc.writeLayer(fp, size, digest); err != nil { + // Cleanup? + return nil, err + } + + // Yes! We have written some layer data. Let's make it visible. Link the + // layer blob into the repository. + if err := luc.linkLayer(digest); err != nil { + return nil, err + } + + // Ok, the upload has completed and finished. Delete the state. + if err := luc.uploadStore.DeleteState(luc.UUID()); err != nil { + // Can we ignore this error? + return nil, err + } + + return luc.layerStore.Fetch(luc.TarSum()) +} + +// Cancel the layer upload process. +func (luc *layerUploadController) Cancel() error { + if err := luc.layerStore.uploadStore.DeleteState(luc.UUID()); err != nil { + return err + } + + return luc.Close() +} + +func (luc *layerUploadController) Write(p []byte) (int, error) { + wr, err := luc.file() + if err != nil { + return 0, err + } + + n, err := wr.Write(p) + + // Because we expect the reported offset to be consistent with the storage + // state, unfortunately, we need to Sync on every call to write. + if err := wr.Sync(); err != nil { + // Effectively, ignore the write state if the Sync fails. Report that + // no bytes were written and seek back to the starting offset. + offset, seekErr := wr.Seek(luc.Offset(), os.SEEK_SET) + if seekErr != nil { + // What do we do here? Quite disasterous. + luc.reset() + + return 0, fmt.Errorf("multiple errors encounterd after Sync + Seek: %v then %v", err, seekErr) + } + + if offset != luc.Offset() { + return 0, fmt.Errorf("unexpected offset after seek") + } + + return 0, err + } + + luc.LayerUploadState.Offset += int64(n) + + if err := luc.uploadStore.SaveState(luc.LayerUploadState); err != nil { + // TODO(stevvooe): This failure case may require more thought. + return n, err + } + + return n, err +} + +func (luc *layerUploadController) Close() error { + if luc.err != nil { + return luc.err + } + + if luc.fp != nil { + luc.err = luc.fp.Close() + } + + return luc.err +} + +func (luc *layerUploadController) file() (layerFile, error) { + if luc.fp != nil { + return luc.fp, nil + } + + fp, err := luc.uploadStore.Open(luc.UUID()) + + if err != nil { + return nil, err + } + + // TODO(stevvooe): We may need a more aggressive check here to ensure that + // the file length is equal to the current offset. We may want to sync the + // offset before return the layer upload to the client so it can be + // validated before proceeding with any writes. + + // Seek to the current layer offset for good measure. + if _, err = fp.Seek(luc.Offset(), os.SEEK_SET); err != nil { + return nil, err + } + + luc.fp = fp + + return luc.fp, nil +} + +// reset closes and drops the current writer. +func (luc *layerUploadController) reset() { + if luc.fp != nil { + luc.fp.Close() + luc.fp = nil + } +} + +// validateLayer runs several checks on the layer file to ensure its validity. +// This is currently very expensive and relies on fast io and fast seek. +func (luc *layerUploadController) validateLayer(fp layerFile, size int64, digest Digest) error { + // First, seek to the end of the file, checking the size is as expected. + end, err := fp.Seek(0, os.SEEK_END) + if err != nil { + return err + } + + if end != size { + return ErrLayerInvalidLength + } + + // Now seek back to start and take care of tarsum and checksum. + if _, err := fp.Seek(0, os.SEEK_SET); err != nil { + return err + } + + version, err := tarsum.GetVersionFromTarsum(luc.TarSum()) + if err != nil { + return ErrLayerTarSumVersionUnsupported + } + + // // We only support tarsum version 1 for now. + if version != tarsum.Version1 { + return ErrLayerTarSumVersionUnsupported + } + + ts, err := tarsum.NewTarSum(fp, true, tarsum.Version1) + if err != nil { + return err + } + + h := sha256.New() + + // Pull the layer file through by writing it to a checksum. + nn, err := io.Copy(h, ts) + + if nn != int64(size) { + return fmt.Errorf("bad read while finishing upload(%s) %v: %v != %v, err=%v", luc.UUID(), fp, nn, size, err) + } + + if err != nil && err != io.EOF { + return err + } + + calculatedDigest := NewDigest("sha256", h) + + // Compare the digests! + if digest != calculatedDigest { + return ErrLayerInvalidChecksum + } + + // Compare the tarsums! + if ts.Sum(nil) != luc.TarSum() { + return ErrLayerInvalidTarsum + } + + return nil +} + +// writeLayer actually writes the the layer file into its final destination. +// The layer should be validated before commencing the write. +func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest Digest) error { + blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{ + alg: digest.Algorithm(), + digest: digest.Hex(), + }) + + if err != nil { + return err + } + + // Check for existence + if _, err := luc.layerStore.driver.CurrentSize(blobPath); err != nil { + // TODO(stevvooe): This check is kind of problematic and very racy. + switch err := err.(type) { + case storagedriver.PathNotFoundError: + break // ensure that it doesn't exist. + default: + // TODO(stevvooe): This isn't actually an error: the blob store is + // content addressable and we should just use this to ensure we + // have it written. Although, we do need to verify that the + // content that is there is the correct length. + return err + } + } + + // Seek our local layer file back now. + if _, err := fp.Seek(0, os.SEEK_SET); err != nil { + // Cleanup? + return err + } + + // Okay: we can write the file to the blob store. + if err := luc.layerStore.driver.WriteStream(blobPath, 0, uint64(size), fp); err != nil { + return err + } + + return nil +} + +// linkLayer links a valid, written layer blog into the registry, first +// linking the repository namespace, then adding it to the layerindex. +func (luc *layerUploadController) linkLayer(digest Digest) error { + layerLinkPath, err := luc.layerStore.pathMapper.path(layerLinkPathSpec{ + name: luc.Name(), + tarSum: luc.TarSum(), + }) + + if err != nil { + return err + } + + if err := luc.layerStore.driver.PutContent(layerLinkPath, []byte(digest)); err != nil { + return nil + } + + // Link the layer into the name index. + layerIndexLinkPath, err := luc.layerStore.pathMapper.path(layerIndexLinkPathSpec{ + tarSum: luc.TarSum(), + }) + + if err != nil { + return err + } + + // Read back the name index file. If it exists, create it. If not, add the + // new repo to the name list. + + // TODO(stevvooe): This is very racy, as well. Reconsider using list for + // this operation? + layerIndexLinkContent, err := luc.layerStore.driver.GetContent(layerIndexLinkPath) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError: + layerIndexLinkContent = []byte(luc.Name()) + default: + return err + } + } + layerIndexLinkContent = luc.maybeAddNameToLayerIndexLinkContent(layerIndexLinkContent) + + // Write the index content back to the index. + return luc.layerStore.driver.PutContent(layerIndexLinkPath, layerIndexLinkContent) +} + +func (luc *layerUploadController) maybeAddNameToLayerIndexLinkContent(content []byte) []byte { + names := strings.Split(string(content), "\n") + var found bool + // Search the names and find ours + for _, name := range names { + if name == luc.Name() { + found = true + } + } + + if !found { + names = append(names, luc.Name()) + } + + sort.Strings(names) + + return []byte(strings.Join(names, "\n")) +} + +// localFSLayerUploadStore implements a local layerUploadStore. There are some +// complexities around hashsums that make round tripping to the storage +// backend problematic, so we'll store and read locally for now. By GO-beta, +// this should be fully implemented on top of the backend storagedriver. +// +// For now, the directory layout is as follows: +// +// //registry-layer-upload/ +// / +// -> state.json +// -> data +// +// Each upload, identified by uuid, has its own directory with a state file +// and a data file. The state file has a json representation of the current +// state. The data file is the in-progress upload data. +type localFSLayerUploadStore struct { + root string +} + +func newTemporaryLocalFSLayerUploadStore() (layerUploadStore, error) { + path, err := ioutil.TempDir("", "registry-layer-upload") + + if err != nil { + return nil, err + } + + return &localFSLayerUploadStore{ + root: path, + }, nil +} + +func (llufs *localFSLayerUploadStore) New(name, tarSum string) (LayerUploadState, error) { + lus := LayerUploadState{ + Name: name, + TarSum: tarSum, + UUID: uuid.New(), + } + + if err := os.Mkdir(llufs.path(lus.UUID, ""), 0755); err != nil { + return lus, err + } + + return lus, nil +} + +func (llufs *localFSLayerUploadStore) Open(uuid string) (layerFile, error) { + fp, err := os.OpenFile(llufs.path(uuid, "data"), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0644) + + if err != nil { + return nil, err + } + + return fp, nil +} + +func (llufs *localFSLayerUploadStore) GetState(uuid string) (LayerUploadState, error) { + // TODO(stevvoe): Storing this state on the local file system is an + // intermediate stop gap. This technique is unlikely to handle any kind of + // concurrency very well. + + var lus LayerUploadState + fp, err := os.Open(llufs.path(uuid, "state.json")) + if err != nil { + if os.IsNotExist(err) { + return lus, ErrLayerUploadUnknown + } + + return lus, err + } + defer fp.Close() + + dec := json.NewDecoder(fp) + if err := dec.Decode(&lus); err != nil { + return lus, err + } + + return lus, nil +} + +func (llufs *localFSLayerUploadStore) SaveState(lus LayerUploadState) error { + p, err := json.Marshal(lus) + if err != nil { + return err + } + + err = ioutil.WriteFile(llufs.path(lus.UUID, "state.json"), p, 0644) + if os.IsNotExist(err) { + return ErrLayerUploadUnknown + } + + return err +} + +func (llufs *localFSLayerUploadStore) DeleteState(uuid string) error { + if err := os.RemoveAll(llufs.path(uuid, "")); err != nil { + if os.IsNotExist(err) { + return ErrLayerUploadUnknown + } + + return err + } + + return nil +} + +func (llufs *localFSLayerUploadStore) path(uuid, file string) string { + return filepath.Join(llufs.root, uuid, file) +} diff --git a/storage/services.go b/storage/services.go new file mode 100644 index 00000000..dbe5dc75 --- /dev/null +++ b/storage/services.go @@ -0,0 +1,44 @@ +package storage + +import ( + "github.com/docker/docker-registry/storagedriver" +) + +// Services provides various services with application-level operations for +// use across backend storage drivers. +type Services struct { + driver storagedriver.StorageDriver + pathMapper *pathMapper + layerUploadStore layerUploadStore +} + +// NewServices creates a new Services object to access docker objects stored +// in the underlying driver. +func NewServices(driver storagedriver.StorageDriver) *Services { + + layerUploadStore, err := newTemporaryLocalFSLayerUploadStore() + + if err != nil { + // TODO(stevvooe): This failure needs to be understood in the context + // of the lifecycle of the services object, which is uncertain at this + // point. + panic("unable to allocate layerUploadStore: " + err.Error()) + } + + return &Services{ + driver: driver, + pathMapper: &pathMapper{ + // TODO(sday): This should be configurable. + root: "/docker/registry/", + version: storagePathVersion, + }, + layerUploadStore: layerUploadStore, + } +} + +// Layers returns an instance of the LayerService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (ss *Services) Layers() LayerService { + return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper} +} From a3481c5f1c3e33a4144f2ecba7d29a25694f0bb2 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 18 Nov 2014 17:41:48 -0800 Subject: [PATCH 047/165] Adds ability to unwrap ipc errors into their original type This only works for a specific whitelist of error types, which is currently all errors in the storagedriver package. Also improves storagedriver tests to enforce proper error types are returned --- storagedriver/filesystem/driver.go | 11 +++++- storagedriver/ipc/client.go | 18 ++++----- storagedriver/ipc/ipc.go | 54 ++++++++++++++++++++++++-- storagedriver/s3/s3.go | 15 ++++--- storagedriver/testsuites/testsuites.go | 8 ++++ 5 files changed, 85 insertions(+), 21 deletions(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 46134259..eabb493d 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -84,7 +84,7 @@ func (d *Driver) PutContent(subPath string, contents []byte) error { func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { file, err := os.OpenFile(d.subPath(path), os.O_RDONLY, 0644) if err != nil { - return nil, err + return nil, storagedriver.PathNotFoundError{Path: path} } seekPos, err := file.Seek(int64(offset), os.SEEK_SET) @@ -201,7 +201,14 @@ func (d *Driver) List(subPath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *Driver) Move(sourcePath string, destPath string) error { - err := os.Rename(d.subPath(sourcePath), d.subPath(destPath)) + source := d.subPath(sourcePath) + dest := d.subPath(destPath) + + if _, err := os.Stat(source); os.IsNotExist(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + + err := os.Rename(source, dest) return err } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 332afe1e..51b02b46 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -126,7 +126,7 @@ func (driver *StorageDriverClient) Start() error { } if response.Error != nil { - return response.Error + return response.Error.Unwrap() } driver.version = response.Version @@ -194,7 +194,7 @@ func (driver *StorageDriverClient) GetContent(path string) ([]byte, error) { } if response.Error != nil { - return nil, response.Error + return nil, response.Error.Unwrap() } defer response.Reader.Close() @@ -226,7 +226,7 @@ func (driver *StorageDriverClient) PutContent(path string, contents []byte) erro } if response.Error != nil { - return response.Error + return response.Error.Unwrap() } return nil @@ -253,7 +253,7 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re } if response.Error != nil { - return nil, response.Error + return nil, response.Error.Unwrap() } return response.Reader, nil @@ -280,7 +280,7 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, } if response.Error != nil { - return response.Error + return response.Error.Unwrap() } return nil @@ -307,7 +307,7 @@ func (driver *StorageDriverClient) CurrentSize(path string) (uint64, error) { } if response.Error != nil { - return 0, response.Error + return 0, response.Error.Unwrap() } return response.Position, nil @@ -334,7 +334,7 @@ func (driver *StorageDriverClient) List(path string) ([]string, error) { } if response.Error != nil { - return nil, response.Error + return nil, response.Error.Unwrap() } return response.Keys, nil @@ -361,7 +361,7 @@ func (driver *StorageDriverClient) Move(sourcePath string, destPath string) erro } if response.Error != nil { - return response.Error + return response.Error.Unwrap() } return nil @@ -387,7 +387,7 @@ func (driver *StorageDriverClient) Delete(path string) error { } if response.Error != nil { - return response.Error + return response.Error.Unwrap() } return nil diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 898d10bf..182a1af6 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -37,9 +37,13 @@ type Request struct { } // ResponseError is a serializable error type. +// The Type and Parameters may be used to reconstruct the same error on the +// client side, falling back to using the Type and Message if this cannot be +// done. type ResponseError struct { - Type string - Message string + Type string + Message string + Parameters map[string]interface{} } // WrapError wraps an error in a serializable struct containing the error's type @@ -48,10 +52,52 @@ func WrapError(err error) *ResponseError { if err == nil { return nil } - return &ResponseError{ - Type: reflect.TypeOf(err).String(), + v := reflect.ValueOf(err) + re := ResponseError{ + Type: v.Type().String(), Message: err.Error(), } + + if v.Kind() == reflect.Struct { + re.Parameters = make(map[string]interface{}) + for i := 0; i < v.NumField(); i++ { + field := v.Type().Field(i) + re.Parameters[field.Name] = v.Field(i).Interface() + } + } + return &re +} + +// Unwrap returns the underlying error if it can be reconstructed, or the +// original ResponseError otherwise. +func (err *ResponseError) Unwrap() error { + var errVal reflect.Value + var zeroVal reflect.Value + + switch err.Type { + case "storagedriver.PathNotFoundError": + errVal = reflect.ValueOf(&storagedriver.PathNotFoundError{}) + case "storagedriver.InvalidOffsetError": + errVal = reflect.ValueOf(&storagedriver.InvalidOffsetError{}) + } + if errVal == zeroVal { + return err + } + + for k, v := range err.Parameters { + fieldVal := errVal.Elem().FieldByName(k) + if fieldVal == zeroVal { + return err + } + fieldVal.Set(reflect.ValueOf(v)) + } + + if unwrapped, ok := errVal.Elem().Interface().(error); ok { + return unwrapped + } + + return err + } func (err *ResponseError) Error() string { diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 82071b2e..def03e3e 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -106,7 +106,11 @@ func New(accessKey string, secretKey string, region aws.Region, encrypt bool, bu // GetContent retrieves the content stored at "path" as a []byte. func (d *Driver) GetContent(path string) ([]byte, error) { - return d.Bucket.Get(path) + content, err := d.Bucket.Get(path) + if err != nil { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return content, nil } // PutContent stores the []byte content at a location designated by "path". @@ -121,11 +125,10 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { headers.Add("Range", "bytes="+strconv.FormatUint(offset, 10)+"-") resp, err := d.Bucket.GetResponseWithHeaders(path, headers) - if resp != nil { - return resp.Body, err + if err != nil { + return nil, storagedriver.PathNotFoundError{Path: path} } - - return nil, err + return resp.Body, nil } // WriteStream stores the contents of the provided io.ReadCloser at a location @@ -242,7 +245,7 @@ func (d *Driver) Move(sourcePath string, destPath string) error { s3.CopyOptions{Options: d.getOptions(), MetadataDirective: "", ContentType: d.getContentType()}, d.Bucket.Name+"/"+sourcePath) if err != nil { - return err + return storagedriver.PathNotFoundError{Path: sourcePath} } return d.Delete(sourcePath) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 217237f7..45633d10 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -126,6 +126,7 @@ func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.GetContent(filename) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestWriteReadStreams1 tests a simple write-read streaming workflow @@ -247,6 +248,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.ReadStream(filename, 0) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestList checks the returned list of keys after populating a directory tree @@ -297,6 +299,7 @@ func (suite *DriverSuite) TestMove(c *check.C) { _, err = suite.StorageDriver.GetContent(sourcePath) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestMoveNonexistent checks that moving a nonexistent key fails @@ -306,6 +309,7 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { err := suite.StorageDriver.Move(sourcePath, destPath) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestDelete checks that the delete operation removes data from the storage @@ -324,6 +328,7 @@ func (suite *DriverSuite) TestDelete(c *check.C) { _, err = suite.StorageDriver.GetContent(filename) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestDeleteNonexistent checks that removing a nonexistent key fails @@ -331,6 +336,7 @@ func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomString(32) err := suite.StorageDriver.Delete(filename) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestDeleteFolder checks that deleting a folder removes all child elements @@ -354,9 +360,11 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents, expected []byte) { From 5789ca7629cd147f04ecbe922b5abc602a121d98 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 18 Nov 2014 19:38:14 -0800 Subject: [PATCH 048/165] Update routes to reflect specification changes To bring the implementation inline with the specification, the names and structure of the API routes have been updated. The overloaded term "image" has been replaced with the term "manifest", which may also be known as "image manifest". The desire for the layer storage to be more of a general blob storage is reflected in moving from "layer" api prefixes to "blob". The "tarsum" path parameter has been replaced by a more general "digest" parameter and is no longer required to start uploads. Another set of changes will come along to support this change at the storage service layer. --- app.go | 6 ++--- app_test.go | 10 +++----- layer.go | 3 ++- layerupload.go | 1 + routes.go | 46 +++++++++++++++++----------------- routes_test.go | 67 ++++++++++++++++++++++++++++---------------------- 6 files changed, 70 insertions(+), 63 deletions(-) diff --git a/app.go b/app.go index 63635250..bc7df554 100644 --- a/app.go +++ b/app.go @@ -29,10 +29,10 @@ func NewApp(configuration configuration.Configuration) *App { // Register the handler dispatchers. app.register(routeNameImageManifest, imageManifestDispatcher) - app.register(routeNameLayer, layerDispatcher) + app.register(routeNameBlob, layerDispatcher) app.register(routeNameTags, tagsDispatcher) - app.register(routeNameLayerUpload, layerUploadDispatcher) - app.register(routeNameLayerUploadResume, layerUploadDispatcher) + app.register(routeNameBlobUpload, layerUploadDispatcher) + app.register(routeNameBlobUploadResume, layerUploadDispatcher) return app } diff --git a/app_test.go b/app_test.go index e0fa727f..bb78044a 100644 --- a/app_test.go +++ b/app_test.go @@ -84,24 +84,22 @@ func TestAppDispatcher(t *testing.T) { }, }, { - endpoint: routeNameLayer, + endpoint: routeNameBlob, vars: []string{ "name", "foo/bar", - "tarsum", "tarsum.v1+bogus:abcdef0123456789", + "digest", "tarsum.v1+bogus:abcdef0123456789", }, }, { - endpoint: routeNameLayerUpload, + endpoint: routeNameBlobUpload, vars: []string{ "name", "foo/bar", - "tarsum", "tarsum.v1+bogus:abcdef0123456789", }, }, { - endpoint: routeNameLayerUploadResume, + endpoint: routeNameBlobUploadResume, vars: []string{ "name", "foo/bar", - "tarsum", "tarsum.v1+bogus:abcdef0123456789", "uuid", "theuuid", }, }, diff --git a/layer.go b/layer.go index 96920a8e..82a1e6d9 100644 --- a/layer.go +++ b/layer.go @@ -16,7 +16,8 @@ func layerDispatcher(ctx *Context, r *http.Request) http.Handler { layerHandler.log = layerHandler.log.WithField("tarsum", layerHandler.TarSum) return handlers.MethodHandler{ - "GET": http.HandlerFunc(layerHandler.GetLayer), + "GET": http.HandlerFunc(layerHandler.GetLayer), + "HEAD": http.HandlerFunc(layerHandler.GetLayer), } } diff --git a/layerupload.go b/layerupload.go index 3eb2ff9a..8916b552 100644 --- a/layerupload.go +++ b/layerupload.go @@ -24,6 +24,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { return handlers.MethodHandler{ "POST": http.HandlerFunc(layerUploadHandler.StartLayerUpload), "GET": http.HandlerFunc(layerUploadHandler.GetUploadStatus), + "HEAD": http.HandlerFunc(layerUploadHandler.GetUploadStatus), "PUT": http.HandlerFunc(layerUploadHandler.PutLayerChunk), "DELETE": http.HandlerFunc(layerUploadHandler.CancelLayerUpload), } diff --git a/routes.go b/routes.go index 8da7c3e2..4aa0097f 100644 --- a/routes.go +++ b/routes.go @@ -6,19 +6,19 @@ import ( ) const ( - routeNameImageManifest = "image-manifest" - routeNameTags = "tags" - routeNameLayer = "layer" - routeNameLayerUpload = "layer-upload" - routeNameLayerUploadResume = "layer-upload-resume" + routeNameImageManifest = "image-manifest" + routeNameTags = "tags" + routeNameBlob = "blob" + routeNameBlobUpload = "blob-upload" + routeNameBlobUploadResume = "blob-upload-resume" ) var allEndpoints = []string{ routeNameImageManifest, routeNameTags, - routeNameLayer, - routeNameLayerUpload, - routeNameLayerUploadResume, + routeNameBlob, + routeNameBlobUpload, + routeNameBlobUploadResume, } // v2APIRouter builds a gorilla router with named routes for the various API @@ -27,11 +27,11 @@ func v2APIRouter() *mux.Router { router := mux.NewRouter(). StrictSlash(true) - // GET /v2//image/ Image Manifest Fetch the image manifest identified by name and tag. - // PUT /v2//image/ Image Manifest Upload the image manifest identified by name and tag. - // DELETE /v2//image/ Image Manifest Delete the image identified by name and tag. + // GET /v2//manifest/ Image Manifest Fetch the image manifest identified by name and tag. + // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and tag. + // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and tag. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/image/{tag:" + common.TagNameRegexp.String() + "}"). + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/manifest/{tag:" + common.TagNameRegexp.String() + "}"). Name(routeNameImageManifest) // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. @@ -39,22 +39,22 @@ func v2APIRouter() *mux.Router { Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/tags/list"). Name(routeNameTags) - // GET /v2//layer/ Layer Fetch the layer identified by tarsum. + // GET /v2//blob/ Layer Fetch the blob identified by digest. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}"). - Name(routeNameLayer) + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). + Name(routeNameBlob) - // POST /v2//layer//upload/ Layer Upload Initiate an upload of the layer identified by tarsum. Requires length and a checksum parameter. + // POST /v2//blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}/upload/"). - Name(routeNameLayerUpload) + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/upload/"). + Name(routeNameBlobUpload) - // GET /v2//layer//upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. - // PUT /v2//layer//upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. - // DELETE /v2//layer//upload/ Layer Upload Cancel the upload identified by layer and uuid + // GET /v2//blob/upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. + // PUT /v2//blob/upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. + // DELETE /v2//blob/upload/ Layer Upload Cancel the upload identified by layer and uuid router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/layer/{tarsum:" + common.TarsumRegexp.String() + "}/upload/{uuid}"). - Name(routeNameLayerUploadResume) + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/upload/{uuid}"). + Name(routeNameBlobUploadResume) return router } diff --git a/routes_test.go b/routes_test.go index 7ca51ae5..9085d302 100644 --- a/routes_test.go +++ b/routes_test.go @@ -48,7 +48,7 @@ func TestRouter(t *testing.T) { for _, testcase := range []routeTestCase{ { RouteName: routeNameImageManifest, - RequestURI: "/v2/foo/bar/image/tag", + RequestURI: "/v2/foo/bar/manifest/tag", Vars: map[string]string{ "name": "foo/bar", "tag": "tag", @@ -62,68 +62,75 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameLayer, - RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234", + RouteName: routeNameBlob, + RequestURI: "/v2/foo/bar/blob/tarsum.dev+foo:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", - "tarsum": "tarsum.dev+foo:abcdef0919234", + "digest": "tarsum.dev+foo:abcdef0919234", }, }, { - RouteName: routeNameLayerUpload, - RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/", + RouteName: routeNameBlob, + RequestURI: "/v2/foo/bar/blob/sha256:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", - "tarsum": "tarsum.dev+foo:abcdef0919234", + "digest": "sha256:abcdef0919234", }, }, { - RouteName: routeNameLayerUploadResume, - RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/uuid", + RouteName: routeNameBlobUpload, + RequestURI: "/v2/foo/bar/blob/upload/", Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum.dev+foo:abcdef0919234", - "uuid": "uuid", + "name": "foo/bar", }, }, { - RouteName: routeNameLayerUploadResume, - RequestURI: "/v2/foo/bar/layer/tarsum.dev+foo:abcdef0919234/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + RouteName: routeNameBlobUploadResume, + RequestURI: "/v2/foo/bar/blob/upload/uuid", Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum.dev+foo:abcdef0919234", - "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + "name": "foo/bar", + "uuid": "uuid", + }, + }, + { + RouteName: routeNameBlobUploadResume, + RequestURI: "/v2/foo/bar/blob/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + }, + }, + { + RouteName: routeNameBlobUploadResume, + RequestURI: "/v2/foo/bar/blob/upload/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + Vars: map[string]string{ + "name": "foo/bar", + "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", }, }, - { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag // "tags" RouteName: routeNameImageManifest, - RequestURI: "/v2/foo/bar/image/image/tags", + RequestURI: "/v2/foo/bar/manifest/manifest/tags", Vars: map[string]string{ - "name": "foo/bar/image", + "name": "foo/bar/manifest", "tag": "tags", }, }, { // This case presents an ambiguity between foo/bar with tag="tags" - // and list tags for "foo/bar/image" + // and list tags for "foo/bar/manifest" RouteName: routeNameTags, - RequestURI: "/v2/foo/bar/image/tags/list", + RequestURI: "/v2/foo/bar/manifest/tags/list", Vars: map[string]string{ - "name": "foo/bar/image", + "name": "foo/bar/manifest", }, }, { - RouteName: routeNameLayerUploadResume, - RequestURI: "/v2/foo/../../layer/tarsum.dev+foo:abcdef0919234/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - Vars: map[string]string{ - "name": "foo/bar", - "tarsum": "tarsum.dev+foo:abcdef0919234", - "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - }, + RouteName: routeNameBlobUploadResume, + RequestURI: "/v2/foo/../../layer/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, } { From 3cfe9aede51ed09e2763cc08afc53bd5c652704f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 19 Nov 2014 13:23:01 -0800 Subject: [PATCH 049/165] Move Digest type into discrete package The Digest type will be fairly central for blob and layer management. The type presented in this package provides a number of core features that should enable reliable use within the registry. This commit will be followed by others that convert the storage layer and webapp to use this type as the primary layer/blob CAS identifier. --- digest/digest.go | 145 +++++++++++++++++++++++++++++++++++++++ digest/digest_test.go | 80 +++++++++++++++++++++ digest/doc.go | 52 ++++++++++++++ digest/verifiers.go | 131 +++++++++++++++++++++++++++++++++++ digest/verifiers_test.go | 71 +++++++++++++++++++ 5 files changed, 479 insertions(+) create mode 100644 digest/digest.go create mode 100644 digest/digest_test.go create mode 100644 digest/doc.go create mode 100644 digest/verifiers.go create mode 100644 digest/verifiers_test.go diff --git a/digest/digest.go b/digest/digest.go new file mode 100644 index 00000000..f2ce021a --- /dev/null +++ b/digest/digest.go @@ -0,0 +1,145 @@ +package digest + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io" + "io/ioutil" + "strings" + + "github.com/docker/docker-registry/common" + "github.com/docker/docker/pkg/tarsum" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// More important for this code base, this type is compatible with tarsum +// digests. For example, the following would be a valid Digest: +// +// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg string, h hash.Hash) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) +} + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported by registry. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + // Common case will be tarsum + _, err := common.ParseTarSum(s) + if err == nil { + return Digest(s), nil + } + + // Continue on for general parser + + i := strings.Index(s, ":") + if i < 0 { + return "", ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return "", ErrDigestInvalidFormat + } + + switch s[:i] { + case "md5", "sha1", "sha256": + break + default: + return "", ErrDigestUnsupported + } + + return Digest(s), nil +} + +// DigestReader returns the most valid digest for the underlying content. +func DigestReader(rd io.Reader) (Digest, error) { + + // TODO(stevvooe): This is pretty inefficient to always be calculating a + // sha256 hash to provide fallback, but it provides some nice semantics in + // that we never worry about getting the right digest for a given reader. + // For the most part, we can detect tar vs non-tar with only a few bytes, + // so a scheme that saves those bytes would probably be better here. + + h := sha256.New() + tr := io.TeeReader(rd, h) + + ts, err := tarsum.NewTarSum(tr, true, tarsum.Version1) + if err != nil { + return "", err + } + + // Try to copy from the tarsum, if we fail, copy the remaining bytes into + // hash directly. + if _, err := io.Copy(ioutil.Discard, ts); err != nil { + if err.Error() != "archive/tar: invalid tar header" { + return "", err + } + + if _, err := io.Copy(h, rd); err != nil { + return "", err + } + + return NewDigest("sha256", h), nil + } + + d, err := ParseDigest(ts.Sum(nil)) + if err != nil { + return "", err + } + + return d, nil +} + +func DigestBytes(p []byte) (Digest, error) { + return DigestReader(bytes.NewReader(p)) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() string { + return string(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("invalid digest: " + d) + } + + return i +} diff --git a/digest/digest_test.go b/digest/digest_test.go new file mode 100644 index 00000000..127f7873 --- /dev/null +++ b/digest/digest_test.go @@ -0,0 +1,80 @@ +package digest + +import "testing" + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm string + hex string + }{ + { + input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "tarsum+sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "tarsum.dev+sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + algorithm: "tarsum.v1+sha256", + hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", + }, + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "md5:d41d8cd98f00b204e9800998ecf8427e", + algorithm: "md5", + hex: "d41d8cd98f00b204e9800998ecf8427e", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + } { + digest, err := ParseDigest(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Hex() != testcase.hex { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) + } + + // Parse string return value and check equality + newParsed, err := ParseDigest(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + } +} diff --git a/digest/doc.go b/digest/doc.go new file mode 100644 index 00000000..2ce7698c --- /dev/null +++ b/digest/doc.go @@ -0,0 +1,52 @@ +// This package provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with tarsums and +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". A tarsum example will be more illustrative of the use case +// involved in the registry: +// +// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b +// +// For this, we consider the algorithm to be "tarsum+sha256". Prudent +// applications will favor the ParseDigest function to verify the format over +// using simple type casts. However, a normal string can be cast as a digest +// with a simple type conversion: +// +// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b") +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/digest/verifiers.go b/digest/verifiers.go new file mode 100644 index 00000000..e738026a --- /dev/null +++ b/digest/verifiers.go @@ -0,0 +1,131 @@ +package digest + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "hash" + "io" + "io/ioutil" + + "github.com/docker/docker/pkg/tarsum" +) + +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool + + // Planned methods: + // Err() error + // Reset() +} + +func DigestVerifier(d Digest) Verifier { + alg := d.Algorithm() + switch alg { + case "md5", "sha1", "sha256": + return hashVerifier{ + hash: newHash(alg), + digest: d, + } + default: + // Assume we have a tarsum. + version, err := tarsum.GetVersionFromTarsum(string(d)) + if err != nil { + panic(err) // Always assume valid tarsum at this point. + } + + pr, pw := io.Pipe() + + // TODO(stevvooe): We may actually want to ban the earlier versions of + // tarsum. That decision may not be the place of the verifier. + + ts, err := tarsum.NewTarSum(pr, true, version) + if err != nil { + panic(err) + } + + // TODO(sday): Ick! A goroutine per digest verification? We'll have to + // get the tarsum library to export an io.Writer variant. + go func() { + io.Copy(ioutil.Discard, ts) + pw.Close() + }() + + return &tarsumVerifier{ + digest: d, + ts: ts, + pr: pr, + pw: pw, + } + } + + panic("unsupported digest: " + d) +} + +// LengthVerifier returns a verifier that returns true when the number of read +// bytes equals the expected parameter. +func LengthVerifier(expected int64) Verifier { + return &lengthVerifier{ + expected: expected, + } +} + +type lengthVerifier struct { + expected int64 // expected bytes read + len int64 // bytes read +} + +func (lv *lengthVerifier) Write(p []byte) (n int, err error) { + n = len(p) + lv.len += int64(n) + return n, err +} + +func (lv *lengthVerifier) Verified() bool { + return lv.expected == lv.len +} + +func newHash(name string) hash.Hash { + switch name { + case "sha256": + return sha256.New() + case "sha1": + return sha1.New() + case "md5": + return md5.New() + default: + panic("unsupport algorithm: " + name) + } +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} + +type tarsumVerifier struct { + digest Digest + ts tarsum.TarSum + pr *io.PipeReader + pw *io.PipeWriter +} + +func (tv *tarsumVerifier) Write(p []byte) (n int, err error) { + return tv.pw.Write(p) +} + +func (tv *tarsumVerifier) Verified() bool { + return tv.digest == Digest(tv.ts.Sum(nil)) +} diff --git a/digest/verifiers_test.go b/digest/verifiers_test.go new file mode 100644 index 00000000..77b02ed0 --- /dev/null +++ b/digest/verifiers_test.go @@ -0,0 +1,71 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "os" + "testing" + + "github.com/docker/docker-registry/common/testutil" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest, err := DigestBytes(p) + if err != nil { + t.Fatalf("unexpected error digesting bytes: %#v", err) + } + + verifier := DigestVerifier(digest) + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } + + tf, tarSum, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating tarfile: %v", err) + } + + digest, err = DigestReader(tf) + if err != nil { + t.Fatalf("error digesting tarsum: %v", err) + } + + if digest.String() != tarSum { + t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum) + } + + expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size + tf.Seek(0, os.SEEK_SET) // seek back + + // This is the most relevant example for the registry application. It's + // effectively a read through pipeline, where the final sink is the digest + // verifier. + verifier = DigestVerifier(digest) + lengthVerifier := LengthVerifier(expectedSize) + rd := io.TeeReader(tf, lengthVerifier) + io.Copy(verifier, rd) + + if !lengthVerifier.Verified() { + t.Fatalf("verifier detected incorrect length") + } + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. We should be tarsum/gzip limited for common cases but we +// want to verify this. +// +// The relevant benchmarks for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// go test -bench . github.com/docker/docker/pkg/tarsum +// From 1a508d67d959daa427c46488f74380561c844713 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 19 Nov 2014 14:39:32 -0800 Subject: [PATCH 050/165] Move storage package to use Digest type Mostly, we've made superficial changes to the storage package to start using the Digest type. Many of the exported interface methods have been changed to reflect this in addition to changes in the way layer uploads will be initiated. Further work here is necessary but will come with a separate PR. --- storage/digest.go | 59 --------------- storage/layer.go | 38 ++++------ storage/layer_test.go | 162 ++++++++++------------------------------- storage/layerreader.go | 8 +- storage/layerstore.go | 41 ++++------- storage/layerupload.go | 121 +++++++++++++----------------- storage/paths.go | 26 ++++++- storage/paths_test.go | 10 ++- 8 files changed, 156 insertions(+), 309 deletions(-) delete mode 100644 storage/digest.go diff --git a/storage/digest.go b/storage/digest.go deleted file mode 100644 index db5c884b..00000000 --- a/storage/digest.go +++ /dev/null @@ -1,59 +0,0 @@ -package storage - -import ( - "fmt" - "hash" - "strings" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg string, h hash.Hash) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) -} - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported by registry. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - parts := strings.SplitN(s, ":", 2) - if len(parts) != 2 { - return "", ErrDigestInvalidFormat - } - - switch parts[0] { - case "sha256": - break - default: - return "", ErrDigestUnsupported - } - - return Digest(s), nil -} - -// Algorithm returns the algorithm portion of the digest. -func (d Digest) Algorithm() string { - return strings.SplitN(string(d), ":", 2)[0] -} - -// Hex returns the hex digest portion of the digest. -func (d Digest) Hex() string { - return strings.SplitN(string(d), ":", 2)[1] -} diff --git a/storage/layer.go b/storage/layer.go index bae69701..6c45f401 100644 --- a/storage/layer.go +++ b/storage/layer.go @@ -4,24 +4,25 @@ import ( "fmt" "io" "time" + + "github.com/docker/docker-registry/digest" ) // LayerService provides operations on layer files in a backend storage. type LayerService interface { // Exists returns true if the layer exists. - Exists(tarSum string) (bool, error) + Exists(name string, digest digest.Digest) (bool, error) // Fetch the layer identifed by TarSum. - Fetch(tarSum string) (Layer, error) + Fetch(name string, digest digest.Digest) (Layer, error) - // Upload begins a layer upload, returning a handle. If the layer upload - // is already in progress or the layer has already been uploaded, this - // will return an error. - Upload(name, tarSum string) (LayerUpload, error) + // Upload begins a layer upload to repository identified by name, + // returning a handle. + Upload(name string) (LayerUpload, error) // Resume continues an in progress layer upload, returning the current // state of the upload. - Resume(name, tarSum, uuid string) (LayerUpload, error) + Resume(uuid string) (LayerUpload, error) } // Layer provides a readable and seekable layer object. Typically, @@ -35,8 +36,9 @@ type Layer interface { // Name returns the repository under which this layer is linked. Name() string // TODO(stevvooe): struggling with nomenclature: should this be "repo" or "name"? - // TarSum returns the unique tarsum of the layer. - TarSum() string + // Digest returns the unique digest of the blob, which is the tarsum for + // layers. + Digest() digest.Digest // CreatedAt returns the time this layer was created. Until we implement // Stat call on storagedriver, this just returns the zero time. @@ -55,18 +57,13 @@ type LayerUpload interface { // Name of the repository under which the layer will be linked. Name() string - // TarSum identifier of the proposed layer. Resulting data must match this - // tarsum. - TarSum() string - // Offset returns the position of the last byte written to this layer. Offset() int64 // Finish marks the upload as completed, returning a valid handle to the - // uploaded layer. The final size and checksum are validated against the - // contents of the uploaded layer. The checksum should be provided in the - // format :. - Finish(size int64, digest string) (Layer, error) + // uploaded layer. The final size and digest are validated against the + // contents of the uploaded layer. + Finish(size int64, digest digest.Digest) (Layer, error) // Cancel the layer upload process. Cancel() error @@ -85,11 +82,8 @@ var ( // ErrLayerUploadUnknown returned when upload is not found. ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - // ErrLayerInvalidChecksum returned when checksum/digest check fails. - ErrLayerInvalidChecksum = fmt.Errorf("invalid layer checksum") - - // ErrLayerInvalidTarsum returned when tarsum check fails. - ErrLayerInvalidTarsum = fmt.Errorf("invalid layer tarsum") + // ErrLayerInvalidDigest returned when tarsum check fails. + ErrLayerInvalidDigest = fmt.Errorf("invalid layer digest") // ErrLayerInvalidLength returned when length check fails. ErrLayerInvalidLength = fmt.Errorf("invalid layer length") diff --git a/storage/layer_test.go b/storage/layer_test.go index 72187810..335793d2 100644 --- a/storage/layer_test.go +++ b/storage/layer_test.go @@ -1,20 +1,16 @@ package storage import ( - "archive/tar" "bytes" - "crypto/rand" "crypto/sha256" "fmt" "io" "io/ioutil" - mrand "math/rand" "os" "testing" - "time" - - "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker-registry/common/testutil" + "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/inmemory" ) @@ -22,12 +18,14 @@ import ( // TestSimpleLayerUpload covers the layer upload process, exercising common // error paths that might be seen during an upload. func TestSimpleLayerUpload(t *testing.T) { - randomDataReader, tarSum, err := createRandomReader() + randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } + dgst := digest.Digest(tarSumStr) + uploadStore, err := newTemporaryLocalFSLayerUploadStore() if err != nil { t.Fatalf("error allocating upload store: %v", err) @@ -48,7 +46,7 @@ func TestSimpleLayerUpload(t *testing.T) { h := sha256.New() rd := io.TeeReader(randomDataReader, h) - layerUpload, err := ls.Upload(imageName, tarSum) + layerUpload, err := ls.Upload(imageName) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) @@ -60,13 +58,13 @@ func TestSimpleLayerUpload(t *testing.T) { } // Do a resume, get unknown upload - layerUpload, err = ls.Resume(imageName, tarSum, layerUpload.UUID()) + layerUpload, err = ls.Resume(layerUpload.UUID()) if err != ErrLayerUploadUnknown { t.Fatalf("unexpected error resuming upload, should be unkown: %v", err) } // Restart! - layerUpload, err = ls.Upload(imageName, tarSum) + layerUpload, err = ls.Upload(imageName) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } @@ -92,25 +90,25 @@ func TestSimpleLayerUpload(t *testing.T) { layerUpload.Close() // Do a resume, for good fun - layerUpload, err = ls.Resume(imageName, tarSum, layerUpload.UUID()) + layerUpload, err = ls.Resume(layerUpload.UUID()) if err != nil { t.Fatalf("unexpected error resuming upload: %v", err) } - digest := NewDigest("sha256", h) - layer, err := layerUpload.Finish(randomDataSize, string(digest)) + sha256Digest := digest.NewDigest("sha256", h) + layer, err := layerUpload.Finish(randomDataSize, dgst) if err != nil { t.Fatalf("unexpected error finishing layer upload: %v", err) } // After finishing an upload, it should no longer exist. - if _, err := ls.Resume(imageName, tarSum, layerUpload.UUID()); err != ErrLayerUploadUnknown { + if _, err := ls.Resume(layerUpload.UUID()); err != ErrLayerUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) } // Test for existence. - exists, err := ls.Exists(layer.TarSum()) + exists, err := ls.Exists(layer.Name(), layer.Digest()) if err != nil { t.Fatalf("unexpected error checking for existence: %v", err) } @@ -129,8 +127,8 @@ func TestSimpleLayerUpload(t *testing.T) { t.Fatalf("incorrect read length") } - if NewDigest("sha256", h) != digest { - t.Fatalf("unexpected digest from uploaded layer: %q != %q", NewDigest("sha256", h), digest) + if digest.NewDigest("sha256", h) != sha256Digest { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) } } @@ -148,13 +146,15 @@ func TestSimpleLayerRead(t *testing.T) { }, } - randomLayerReader, tarSum, err := createRandomReader() + randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random data: %v", err) } + dgst := digest.Digest(tarSumStr) + // Test for existence. - exists, err := ls.Exists(tarSum) + exists, err := ls.Exists(imageName, dgst) if err != nil { t.Fatalf("unexpected error checking for existence: %v", err) } @@ -164,7 +164,7 @@ func TestSimpleLayerRead(t *testing.T) { } // Try to get the layer and make sure we get a not found error - layer, err := ls.Fetch(tarSum) + layer, err := ls.Fetch(imageName, dgst) if err == nil { t.Fatalf("error expected fetching unknown layer") } @@ -174,8 +174,7 @@ func TestSimpleLayerRead(t *testing.T) { } else { err = nil } - - randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, tarSum, randomLayerReader) + randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, dgst, randomLayerReader) if err != nil { t.Fatalf("unexpected error writing test layer: %v", err) } @@ -185,7 +184,7 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("error getting seeker size for random layer: %v", err) } - layer, err = ls.Fetch(tarSum) + layer, err = ls.Fetch(imageName, dgst) if err != nil { t.Fatal(err) } @@ -202,9 +201,9 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize) } - digest := NewDigest("sha256", h) - if digest != randomLayerDigest { - t.Fatalf("fetched digest does not match: %q != %q", digest, randomLayerDigest) + sha256Digest := digest.NewDigest("sha256", h) + if sha256Digest != randomLayerDigest { + t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest) } // Now seek back the layer, read the whole thing and check against randomLayerData @@ -270,12 +269,14 @@ func TestLayerReadErrors(t *testing.T) { // writeRandomLayer creates a random layer under name and tarSum using driver // and pathMapper. An io.ReadSeeker with the data is returned, along with the // sha256 hex digest. -func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum string, digest Digest, err error) { - reader, tarSum, err := createRandomReader() +func writeRandomLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string) (rs io.ReadSeeker, tarSum digest.Digest, sha256digest digest.Digest, err error) { + reader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { return nil, "", "", err } + tarSum = digest.Digest(tarSumStr) + // Now, actually create the layer. randomLayerDigest, err := writeTestLayer(driver, pathMapper, name, tarSum, ioutil.NopCloser(reader)) @@ -312,91 +313,10 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) { return end, nil } -// createRandomReader returns a random read seeker and its tarsum. The -// returned content will be a valid tar file with a random number of files and -// content. -func createRandomReader() (rs io.ReadSeeker, tarSum string, err error) { - nFiles := mrand.Intn(10) + 10 - target := &bytes.Buffer{} - wr := tar.NewWriter(target) - - // Perturb this on each iteration of the loop below. - header := &tar.Header{ - Mode: 0644, - ModTime: time.Now(), - Typeflag: tar.TypeReg, - Uname: "randocalrissian", - Gname: "cloudcity", - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - - for fileNumber := 0; fileNumber < nFiles; fileNumber++ { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - header.Name = fmt.Sprint(fileNumber) - header.Size = fileSize - - if err := wr.WriteHeader(header); err != nil { - return nil, "", err - } - - randomData := make([]byte, fileSize) - - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - - if n != len(randomData) { - return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(wr, bytes.NewReader(randomData)) - if nn != fileSize { - return nil, "", fmt.Errorf("short copy writing random file to tar") - } - - if err != nil { - return nil, "", err - } - - if err := wr.Flush(); err != nil { - return nil, "", err - } - } - - if err := wr.Close(); err != nil { - return nil, "", err - } - - reader := bytes.NewReader(target.Bytes()) - - // A tar builder that supports tarsum inline calculation would be awesome - // here. - ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(ioutil.Discard, ts) - if nn != int64(len(target.Bytes())) { - return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) - } - - if err != nil { - return nil, "", err - } - - return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil -} - // createTestLayer creates a simple test layer in the provided driver under -// tarsum, returning the string digest. This is implemented peicemeal and -// should probably be replaced by the uploader when it's ready. -func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name, tarSum string, content io.Reader) (Digest, error) { +// tarsum dgst, returning the sha256 digest location. This is implemented +// peicemeal and should probably be replaced by the uploader when it's ready. +func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) { h := sha256.New() rd := io.TeeReader(content, h) @@ -406,11 +326,11 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, return "", nil } - digest := NewDigest("sha256", h) + blobDigestSHA := digest.NewDigest("sha256", h) blobPath, err := pathMapper.path(blobPathSpec{ - alg: digest.Algorithm(), - digest: digest.Hex(), + alg: blobDigestSHA.Algorithm(), + digest: blobDigestSHA.Hex(), }) if err := driver.PutContent(blobPath, p); err != nil { @@ -418,7 +338,7 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, } layerIndexLinkPath, err := pathMapper.path(layerIndexLinkPathSpec{ - tarSum: tarSum, + digest: dgst, }) if err != nil { @@ -427,18 +347,14 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, layerLinkPath, err := pathMapper.path(layerLinkPathSpec{ name: name, - tarSum: tarSum, + digest: dgst, }) if err != nil { return "", err } - if err != nil { - return "", err - } - - if err := driver.PutContent(layerLinkPath, []byte(string(NewDigest("sha256", h)))); err != nil { + if err := driver.PutContent(layerLinkPath, []byte(blobDigestSHA.String())); err != nil { return "", nil } @@ -446,5 +362,5 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, return "", nil } - return NewDigest("sha256", h), err + return blobDigestSHA, err } diff --git a/storage/layerreader.go b/storage/layerreader.go index df05c367..396940d0 100644 --- a/storage/layerreader.go +++ b/storage/layerreader.go @@ -6,6 +6,8 @@ import ( "io" "os" "time" + + "github.com/docker/docker-registry/digest" ) // layerReadSeeker implements Layer and provides facilities for reading and @@ -16,7 +18,7 @@ type layerReader struct { brd *bufio.Reader name string // repo name of this layer - tarSum string + digest digest.Digest path string createdAt time.Time @@ -35,8 +37,8 @@ func (lrs *layerReader) Name() string { return lrs.name } -func (lrs *layerReader) TarSum() string { - return lrs.tarSum +func (lrs *layerReader) Digest() digest.Digest { + return lrs.digest } func (lrs *layerReader) CreatedAt() time.Time { diff --git a/storage/layerstore.go b/storage/layerstore.go index e2821a83..c9662ffd 100644 --- a/storage/layerstore.go +++ b/storage/layerstore.go @@ -6,6 +6,7 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" ) @@ -15,10 +16,10 @@ type layerStore struct { uploadStore layerUploadStore } -func (ls *layerStore) Exists(tarSum string) (bool, error) { +func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) { // Because this implementation just follows blob links, an existence check // is pretty cheap by starting and closing a fetch. - _, err := ls.Fetch(tarSum) + _, err := ls.Fetch(name, digest) if err != nil { if err == ErrLayerUnknown { @@ -31,8 +32,8 @@ func (ls *layerStore) Exists(tarSum string) (bool, error) { return true, nil } -func (ls *layerStore) Fetch(tarSum string) (Layer, error) { - repos, err := ls.resolveContainingRepositories(tarSum) +func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { + repos, err := ls.resolveContainingRepositories(digest) if err != nil { // TODO(stevvooe): Unknown tarsum error: need to wrap. @@ -44,7 +45,7 @@ func (ls *layerStore) Fetch(tarSum string) (Layer, error) { // against the list of repos to which we have pull access. The argument // repos needs to be filtered against that access list. - name, blobPath, err := ls.resolveBlobPath(repos, tarSum) + _, blobPath, err := ls.resolveBlobPath(repos, digest) if err != nil { // TODO(stevvooe): Map this error correctly, perhaps in the callee. @@ -72,7 +73,7 @@ func (ls *layerStore) Fetch(tarSum string) (Layer, error) { layerStore: ls, path: p, name: name, - tarSum: tarSum, + digest: digest, // TODO(stevvooe): Storage backend does not support modification time // queries yet. Layers "never" change, so just return the zero value. @@ -88,25 +89,13 @@ func (ls *layerStore) Fetch(tarSum string) (Layer, error) { // Upload begins a layer upload, returning a handle. If the layer upload // is already in progress or the layer has already been uploaded, this // will return an error. -func (ls *layerStore) Upload(name, tarSum string) (LayerUpload, error) { - exists, err := ls.Exists(tarSum) - if err != nil { - return nil, err - } - - if exists { - // TODO(stevvoe): This looks simple now, but we really should only - // return the layer exists error when the layer exists AND the current - // client has access to the layer. If the client doesn't have access - // to the layer, the upload should proceed. - return nil, ErrLayerExists - } +func (ls *layerStore) Upload(name string) (LayerUpload, error) { // NOTE(stevvooe): Consider the issues with allowing concurrent upload of // the same two layers. Should it be disallowed? For now, we allow both // parties to proceed and the the first one uploads the layer. - lus, err := ls.uploadStore.New(name, tarSum) + lus, err := ls.uploadStore.New(name) if err != nil { return nil, err } @@ -116,7 +105,7 @@ func (ls *layerStore) Upload(name, tarSum string) (LayerUpload, error) { // Resume continues an in progress layer upload, returning the current // state of the upload. -func (ls *layerStore) Resume(name, tarSum, uuid string) (LayerUpload, error) { +func (ls *layerStore) Resume(uuid string) (LayerUpload, error) { lus, err := ls.uploadStore.GetState(uuid) if err != nil { @@ -135,9 +124,9 @@ func (ls *layerStore) newLayerUpload(lus LayerUploadState) LayerUpload { } } -func (ls *layerStore) resolveContainingRepositories(tarSum string) ([]string, error) { +func (ls *layerStore) resolveContainingRepositories(digest digest.Digest) ([]string, error) { // Lookup the layer link in the index by tarsum id. - layerIndexLinkPath, err := ls.pathMapper.path(layerIndexLinkPathSpec{tarSum: tarSum}) + layerIndexLinkPath, err := ls.pathMapper.path(layerIndexLinkPathSpec{digest: digest}) if err != nil { return nil, err } @@ -164,10 +153,10 @@ func (ls *layerStore) resolveContainingRepositories(tarSum string) ([]string, er // resolveBlobId lookups up the tarSum in the various repos to find the blob // link, returning the repo name and blob path spec or an error on failure. -func (ls *layerStore) resolveBlobPath(repos []string, tarSum string) (name string, bps blobPathSpec, err error) { +func (ls *layerStore) resolveBlobPath(repos []string, digest digest.Digest) (name string, bps blobPathSpec, err error) { for _, repo := range repos { - pathSpec := layerLinkPathSpec{name: repo, tarSum: tarSum} + pathSpec := layerLinkPathSpec{name: repo, digest: digest} layerLinkPath, err := ls.pathMapper.path(pathSpec) if err != nil { @@ -199,5 +188,5 @@ func (ls *layerStore) resolveBlobPath(repos []string, tarSum string) (name strin // TODO(stevvooe): Map this error to repo not found, but it basically // means we exited the loop above without finding a blob link. - return "", bps, fmt.Errorf("unable to resolve blog id for repos=%v and tarSum=%q", repos, tarSum) + return "", bps, fmt.Errorf("unable to resolve blog id for repos=%v and digest=%v", repos, digest) } diff --git a/storage/layerupload.go b/storage/layerupload.go index 7ad32d75..4ad02162 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -1,7 +1,6 @@ package storage import ( - "crypto/sha256" "encoding/json" "fmt" "io/ioutil" @@ -12,6 +11,7 @@ import ( "code.google.com/p/go-uuid/uuid" + "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker/pkg/tarsum" @@ -23,11 +23,6 @@ type LayerUploadState struct { // name is the primary repository under which the layer will be linked. Name string - // tarSum identifies the target layer. Provided by the client. If the - // resulting tarSum does not match this value, an error should be - // returned. - TarSum string - // UUID identifies the upload. UUID string @@ -64,7 +59,7 @@ type layerFile interface { // uploads. This interface will definitely change and will most likely end up // being exported to the app layer. Move the layer.go when it's ready to go. type layerUploadStore interface { - New(name, tarSum string) (LayerUploadState, error) + New(name string) (LayerUploadState, error) Open(uuid string) (layerFile, error) GetState(uuid string) (LayerUploadState, error) SaveState(lus LayerUploadState) error @@ -78,12 +73,6 @@ func (luc *layerUploadController) Name() string { return luc.LayerUploadState.Name } -// TarSum identifier of the proposed layer. Resulting data must match this -// tarsum. -func (luc *layerUploadController) TarSum() string { - return luc.LayerUploadState.TarSum -} - // UUID returns the identifier for this upload. func (luc *layerUploadController) UUID() string { return luc.LayerUploadState.UUID @@ -98,7 +87,7 @@ func (luc *layerUploadController) Offset() int64 { // uploaded layer. The final size and checksum are validated against the // contents of the uploaded layer. The checksum should be provided in the // format :. -func (luc *layerUploadController) Finish(size int64, digestStr string) (Layer, error) { +func (luc *layerUploadController) Finish(size int64, digest digest.Digest) (Layer, error) { // This section is going to be pretty ugly now. We will have to read the // file twice. First, to get the tarsum and checksum. When those are @@ -115,16 +104,11 @@ func (luc *layerUploadController) Finish(size int64, digestStr string) (Layer, e return nil, err } - digest, err := ParseDigest(digestStr) + digest, err = luc.validateLayer(fp, size, digest) if err != nil { return nil, err } - if err := luc.validateLayer(fp, size, digest); err != nil { - // Cleanup? - return nil, err - } - if err := luc.writeLayer(fp, size, digest); err != nil { // Cleanup? return nil, err @@ -142,7 +126,7 @@ func (luc *layerUploadController) Finish(size int64, digestStr string) (Layer, e return nil, err } - return luc.layerStore.Fetch(luc.TarSum()) + return luc.layerStore.Fetch(luc.Name(), digest) } // Cancel the layer upload process. @@ -239,69 +223,69 @@ func (luc *layerUploadController) reset() { } // validateLayer runs several checks on the layer file to ensure its validity. -// This is currently very expensive and relies on fast io and fast seek. -func (luc *layerUploadController) validateLayer(fp layerFile, size int64, digest Digest) error { +// This is currently very expensive and relies on fast io and fast seek on the +// local host. If successful, the latest digest is returned, which should be +// used over the passed in value. +func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst digest.Digest) (digest.Digest, error) { + // First, check the incoming tarsum version of the digest. + version, err := tarsum.GetVersionFromTarsum(dgst.String()) + if err != nil { + return "", err + } + + // TODO(stevvooe): Should we push this down into the digest type? + switch version { + case tarsum.Version1: + default: + // version 0 and dev, for now. + return "", ErrLayerTarSumVersionUnsupported + } + + digestVerifier := digest.DigestVerifier(dgst) + lengthVerifier := digest.LengthVerifier(size) + // First, seek to the end of the file, checking the size is as expected. end, err := fp.Seek(0, os.SEEK_END) if err != nil { - return err + return "", err } if end != size { - return ErrLayerInvalidLength + // Fast path length check. + return "", ErrLayerInvalidLength } - // Now seek back to start and take care of tarsum and checksum. + // Now seek back to start and take care of the digest. if _, err := fp.Seek(0, os.SEEK_SET); err != nil { - return err + return "", err } - version, err := tarsum.GetVersionFromTarsum(luc.TarSum()) + tr := io.TeeReader(fp, lengthVerifier) + tr = io.TeeReader(tr, digestVerifier) + + // TODO(stevvooe): This is one of the places we need a Digester write + // sink. Instead, its read driven. This migth be okay. + + // Calculate an updated digest with the latest version. + dgst, err = digest.DigestReader(tr) if err != nil { - return ErrLayerTarSumVersionUnsupported + return "", err } - // // We only support tarsum version 1 for now. - if version != tarsum.Version1 { - return ErrLayerTarSumVersionUnsupported + if !lengthVerifier.Verified() { + return "", ErrLayerInvalidLength } - ts, err := tarsum.NewTarSum(fp, true, tarsum.Version1) - if err != nil { - return err + if !digestVerifier.Verified() { + return "", ErrLayerInvalidDigest } - h := sha256.New() - - // Pull the layer file through by writing it to a checksum. - nn, err := io.Copy(h, ts) - - if nn != int64(size) { - return fmt.Errorf("bad read while finishing upload(%s) %v: %v != %v, err=%v", luc.UUID(), fp, nn, size, err) - } - - if err != nil && err != io.EOF { - return err - } - - calculatedDigest := NewDigest("sha256", h) - - // Compare the digests! - if digest != calculatedDigest { - return ErrLayerInvalidChecksum - } - - // Compare the tarsums! - if ts.Sum(nil) != luc.TarSum() { - return ErrLayerInvalidTarsum - } - - return nil + return dgst, nil } // writeLayer actually writes the the layer file into its final destination. // The layer should be validated before commencing the write. -func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest Digest) error { +func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest digest.Digest) error { blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{ alg: digest.Algorithm(), digest: digest.Hex(), @@ -342,10 +326,10 @@ func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest Di // linkLayer links a valid, written layer blog into the registry, first // linking the repository namespace, then adding it to the layerindex. -func (luc *layerUploadController) linkLayer(digest Digest) error { +func (luc *layerUploadController) linkLayer(digest digest.Digest) error { layerLinkPath, err := luc.layerStore.pathMapper.path(layerLinkPathSpec{ name: luc.Name(), - tarSum: luc.TarSum(), + digest: digest, }) if err != nil { @@ -358,7 +342,7 @@ func (luc *layerUploadController) linkLayer(digest Digest) error { // Link the layer into the name index. layerIndexLinkPath, err := luc.layerStore.pathMapper.path(layerIndexLinkPathSpec{ - tarSum: luc.TarSum(), + digest: digest, }) if err != nil { @@ -435,11 +419,10 @@ func newTemporaryLocalFSLayerUploadStore() (layerUploadStore, error) { }, nil } -func (llufs *localFSLayerUploadStore) New(name, tarSum string) (LayerUploadState, error) { +func (llufs *localFSLayerUploadStore) New(name string) (LayerUploadState, error) { lus := LayerUploadState{ - Name: name, - TarSum: tarSum, - UUID: uuid.New(), + Name: name, + UUID: uuid.New(), } if err := os.Mkdir(llufs.path(lus.UUID, ""), 0755); err != nil { diff --git a/storage/paths.go b/storage/paths.go index 76991c1f..aedba320 100644 --- a/storage/paths.go +++ b/storage/paths.go @@ -1,6 +1,8 @@ package storage import ( + "strings" + "github.com/docker/docker-registry/digest" "fmt" "path" @@ -9,6 +11,11 @@ import ( const storagePathVersion = "v2" +// TODO(sday): This needs to be changed: all layers for an image will be +// linked under the repository. Lookup from tarsum to name is not necessary, +// so we can remove the layer index. For this to properly work, image push +// must link the images layers under the repo. + // pathMapper maps paths based on "object names" and their ids. The "object // names" mapped by pathMapper are internal to the storage system. // @@ -79,7 +86,12 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { switch v := spec.(type) { case layerLinkPathSpec: - tsi, err := common.ParseTarSum(v.tarSum) + if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { + // Only tarsum is supported, for now + return "", fmt.Errorf("unsupport content digest: %v", v.digest) + } + + tsi, err := common.ParseTarSum(v.digest.String()) if err != nil { // TODO(sday): This will return an InvalidTarSumError from @@ -93,7 +105,12 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return p, nil case layerIndexLinkPathSpec: - tsi, err := common.ParseTarSum(v.tarSum) + if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { + // Only tarsum is supported, for now + return "", fmt.Errorf("unsupport content digest: %v", v.digest) + } + + tsi, err := common.ParseTarSum(v.digest.String()) if err != nil { // TODO(sday): This will return an InvalidTarSumError from @@ -136,7 +153,7 @@ type pathSpec interface { // sha256 that can be fetched from the blob store. type layerLinkPathSpec struct { name string - tarSum string + digest digest.Digest } func (layerLinkPathSpec) pathSpec() {} @@ -152,7 +169,7 @@ func (layerLinkPathSpec) pathSpec() {} // library/ubuntu repository. The storage layer should access the tarsum from // the first repository to which the client has access. type layerIndexLinkPathSpec struct { - tarSum string + digest digest.Digest } func (layerIndexLinkPathSpec) pathSpec() {} @@ -160,6 +177,7 @@ func (layerIndexLinkPathSpec) pathSpec() {} // blobPath contains the path for the registry global blob store. For now, // this contains layer data, exclusively. type blobPathSpec struct { + // TODO(stevvooe): Port this to make better use of Digest type. alg string digest string } diff --git a/storage/paths_test.go b/storage/paths_test.go index 376966c5..5dc4c07c 100644 --- a/storage/paths_test.go +++ b/storage/paths_test.go @@ -1,6 +1,10 @@ package storage -import "testing" +import ( + "testing" + + "github.com/docker/docker-registry/digest" +) func TestPathMapper(t *testing.T) { pm := &pathMapper{ @@ -15,13 +19,13 @@ func TestPathMapper(t *testing.T) { { spec: layerLinkPathSpec{ name: "foo/bar", - tarSum: "tarsum.v1+test:abcdef", + digest: digest.Digest("tarsum.v1+test:abcdef"), }, expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/abcdef", }, { spec: layerIndexLinkPathSpec{ - tarSum: "tarsum.v1+test:abcdef", + digest: digest.Digest("tarsum.v1+test:abcdef"), }, expected: "/pathmapper-test/layerindex/tarsum/v1/test/abcdef", }, From c0fe9d72d1b80bfd68e1f09ab04fa83367dd7666 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 19 Nov 2014 14:59:05 -0800 Subject: [PATCH 051/165] Various adjustments to digest package for govet/golint --- digest/digest.go | 9 +++++---- digest/doc.go | 2 +- digest/verifiers.go | 16 ++++++++++------ digest/verifiers_test.go | 10 +++++----- storage/layerupload.go | 6 +++--- storage/paths.go | 4 ++-- 6 files changed, 26 insertions(+), 21 deletions(-) diff --git a/digest/digest.go b/digest/digest.go index f2ce021a..cbd0ab6b 100644 --- a/digest/digest.go +++ b/digest/digest.go @@ -75,8 +75,8 @@ func ParseDigest(s string) (Digest, error) { return Digest(s), nil } -// DigestReader returns the most valid digest for the underlying content. -func DigestReader(rd io.Reader) (Digest, error) { +// FromReader returns the most valid digest for the underlying content. +func FromReader(rd io.Reader) (Digest, error) { // TODO(stevvooe): This is pretty inefficient to always be calculating a // sha256 hash to provide fallback, but it provides some nice semantics in @@ -114,8 +114,9 @@ func DigestReader(rd io.Reader) (Digest, error) { return d, nil } -func DigestBytes(p []byte) (Digest, error) { - return DigestReader(bytes.NewReader(p)) +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) (Digest, error) { + return FromReader(bytes.NewReader(p)) } // Algorithm returns the algorithm portion of the digest. This will panic if diff --git a/digest/doc.go b/digest/doc.go index 2ce7698c..278c50e0 100644 --- a/digest/doc.go +++ b/digest/doc.go @@ -1,4 +1,4 @@ -// This package provides a generalized type to opaquely represent message +// Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. // More importantly, it provides tools and wrappers to work with tarsums and diff --git a/digest/verifiers.go b/digest/verifiers.go index e738026a..26b2b2b2 100644 --- a/digest/verifiers.go +++ b/digest/verifiers.go @@ -11,6 +11,10 @@ import ( "github.com/docker/docker/pkg/tarsum" ) +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. type Verifier interface { io.Writer @@ -23,7 +27,9 @@ type Verifier interface { // Reset() } -func DigestVerifier(d Digest) Verifier { +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) Verifier { alg := d.Algorithm() switch alg { case "md5", "sha1", "sha256": @@ -62,13 +68,11 @@ func DigestVerifier(d Digest) Verifier { pw: pw, } } - - panic("unsupported digest: " + d) } -// LengthVerifier returns a verifier that returns true when the number of read -// bytes equals the expected parameter. -func LengthVerifier(expected int64) Verifier { +// NewLengthVerifier returns a verifier that returns true when the number of +// read bytes equals the expected parameter. +func NewLengthVerifier(expected int64) Verifier { return &lengthVerifier{ expected: expected, } diff --git a/digest/verifiers_test.go b/digest/verifiers_test.go index 77b02ed0..fb176cc1 100644 --- a/digest/verifiers_test.go +++ b/digest/verifiers_test.go @@ -13,12 +13,12 @@ import ( func TestDigestVerifier(t *testing.T) { p := make([]byte, 1<<20) rand.Read(p) - digest, err := DigestBytes(p) + digest, err := FromBytes(p) if err != nil { t.Fatalf("unexpected error digesting bytes: %#v", err) } - verifier := DigestVerifier(digest) + verifier := NewDigestVerifier(digest) io.Copy(verifier, bytes.NewReader(p)) if !verifier.Verified() { @@ -30,7 +30,7 @@ func TestDigestVerifier(t *testing.T) { t.Fatalf("error creating tarfile: %v", err) } - digest, err = DigestReader(tf) + digest, err = FromReader(tf) if err != nil { t.Fatalf("error digesting tarsum: %v", err) } @@ -45,8 +45,8 @@ func TestDigestVerifier(t *testing.T) { // This is the most relevant example for the registry application. It's // effectively a read through pipeline, where the final sink is the digest // verifier. - verifier = DigestVerifier(digest) - lengthVerifier := LengthVerifier(expectedSize) + verifier = NewDigestVerifier(digest) + lengthVerifier := NewLengthVerifier(expectedSize) rd := io.TeeReader(tf, lengthVerifier) io.Copy(verifier, rd) diff --git a/storage/layerupload.go b/storage/layerupload.go index 4ad02162..c07927f1 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -241,8 +241,8 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d return "", ErrLayerTarSumVersionUnsupported } - digestVerifier := digest.DigestVerifier(dgst) - lengthVerifier := digest.LengthVerifier(size) + digestVerifier := digest.NewDigestVerifier(dgst) + lengthVerifier := digest.NewLengthVerifier(size) // First, seek to the end of the file, checking the size is as expected. end, err := fp.Seek(0, os.SEEK_END) @@ -267,7 +267,7 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d // sink. Instead, its read driven. This migth be okay. // Calculate an updated digest with the latest version. - dgst, err = digest.DigestReader(tr) + dgst, err = digest.FromReader(tr) if err != nil { return "", err } diff --git a/storage/paths.go b/storage/paths.go index aedba320..18aef17e 100644 --- a/storage/paths.go +++ b/storage/paths.go @@ -1,12 +1,12 @@ package storage import ( - "strings" - "github.com/docker/docker-registry/digest" "fmt" "path" + "strings" "github.com/docker/docker-registry/common" + "github.com/docker/docker-registry/digest" ) const storagePathVersion = "v2" From 56118905b854d6cfb03cee995c0749393e81a63b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 19 Nov 2014 15:10:23 -0800 Subject: [PATCH 052/165] Include testutil package needed for tar-based tests --- common/testutil/tarfile.go | 95 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 common/testutil/tarfile.go diff --git a/common/testutil/tarfile.go b/common/testutil/tarfile.go new file mode 100644 index 00000000..08b796f5 --- /dev/null +++ b/common/testutil/tarfile.go @@ -0,0 +1,95 @@ +package testutil + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "time" + + "github.com/docker/docker/pkg/tarsum" +) + +// CreateRandomTarFile creates a random tarfile, returning it as an +// io.ReadSeeker along with its tarsum. An error is returned if there is a +// problem generating valid content. +func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { + nFiles := mrand.Intn(10) + 10 + target := &bytes.Buffer{} + wr := tar.NewWriter(target) + + // Perturb this on each iteration of the loop below. + header := &tar.Header{ + Mode: 0644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Uname: "randocalrissian", + Gname: "cloudcity", + AccessTime: time.Now(), + ChangeTime: time.Now(), + } + + for fileNumber := 0; fileNumber < nFiles; fileNumber++ { + fileSize := mrand.Int63n(1<<20) + 1<<20 + + header.Name = fmt.Sprint(fileNumber) + header.Size = fileSize + + if err := wr.WriteHeader(header); err != nil { + return nil, "", err + } + + randomData := make([]byte, fileSize) + + // Fill up the buffer with some random data. + n, err := rand.Read(randomData) + + if n != len(randomData) { + return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) + } + + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(wr, bytes.NewReader(randomData)) + if nn != fileSize { + return nil, "", fmt.Errorf("short copy writing random file to tar") + } + + if err != nil { + return nil, "", err + } + + if err := wr.Flush(); err != nil { + return nil, "", err + } + } + + if err := wr.Close(); err != nil { + return nil, "", err + } + + reader := bytes.NewReader(target.Bytes()) + + // A tar builder that supports tarsum inline calculation would be awesome + // here. + ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) + if err != nil { + return nil, "", err + } + + nn, err := io.Copy(ioutil.Discard, ts) + if nn != int64(len(target.Bytes())) { + return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) + } + + if err != nil { + return nil, "", err + } + + return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil +} From 1336ced030492ddea4a5cb015e5cba731dd96d6a Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 19 Nov 2014 18:06:54 -0800 Subject: [PATCH 053/165] Updates client to newer routes and changes "layer" to "blob" --- client/client.go | 153 +++++++++++++++++++++++++----------------- client/client_test.go | 136 ++++++++++++++++++------------------- client/pull.go | 2 +- client/push.go | 46 +++++++------ errors.go | 51 ++++++-------- test/test.go | 44 +++++++++--- 6 files changed, 238 insertions(+), 194 deletions(-) diff --git a/client/client.go b/client/client.go index 2ea0e091..8eb156c5 100644 --- a/client/client.go +++ b/client/client.go @@ -16,57 +16,59 @@ import ( // Client implements the client interface to the registry http api type Client interface { // GetImageManifest returns an image manifest for the image at the given - // name, tag pair + // name, tag pair. GetImageManifest(name, tag string) (*registry.ImageManifest, error) // PutImageManifest uploads an image manifest for the image at the given - // name, tag pair + // name, tag pair. PutImageManifest(name, tag string, imageManifest *registry.ImageManifest) error - // DeleteImage removes the image at the given name, tag pair + // DeleteImage removes the image at the given name, tag pair. DeleteImage(name, tag string) error // ListImageTags returns a list of all image tags with the given repository - // name + // name. ListImageTags(name string) ([]string, error) - // GetImageLayer returns the image layer at the given name, tarsum pair in - // the form of an io.ReadCloser with the length of this layer - // A nonzero byteOffset can be provided to receive a partial layer beginning - // at the given offset - GetImageLayer(name, tarsum string, byteOffset int) (io.ReadCloser, int, error) + // BlobLength returns the length of the blob stored at the given name, + // digest pair. + // Returns a length value of -1 on error or if the blob does not exist. + BlobLength(name, digest string) (int, error) - // InitiateLayerUpload starts an image upload for the given name, tarsum - // pair and returns a unique location url to use for other layer upload - // methods - // Returns a *registry.LayerAlreadyExistsError if the layer already exists - // on the registry - InitiateLayerUpload(name, tarsum string) (string, error) + // GetBlob returns the blob stored at the given name, digest pair in the + // form of an io.ReadCloser with the length of this blob. + // A nonzero byteOffset can be provided to receive a partial blob beginning + // at the given offset. + GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) - // GetLayerUploadStatus returns the byte offset and length of the layer at - // the given upload location - GetLayerUploadStatus(location string) (int, int, error) + // InitiateBlobUpload starts a blob upload in the given repository namespace + // and returns a unique location url to use for other blob upload methods. + InitiateBlobUpload(name string) (string, error) - // UploadLayer uploads a full image layer to the registry - UploadLayer(location string, layer io.ReadCloser, length int, checksum *registry.Checksum) error + // GetBlobUploadStatus returns the byte offset and length of the blob at the + // given upload location. + GetBlobUploadStatus(location string) (int, int, error) - // UploadLayerChunk uploads a layer chunk with a given length and startByte - // to the registry - // FinishChunkedLayerUpload must be called to finalize this upload - UploadLayerChunk(location string, layerChunk io.ReadCloser, length, startByte int) error + // UploadBlob uploads a full blob to the registry. + UploadBlob(location string, blob io.ReadCloser, length int, digest string) error - // FinishChunkedLayerUpload completes a chunked layer upload at a given - // location - FinishChunkedLayerUpload(location string, length int, checksum *registry.Checksum) error + // UploadBlobChunk uploads a blob chunk with a given length and startByte to + // the registry. + // FinishChunkedBlobUpload must be called to finalize this upload. + UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error - // CancelLayerUpload deletes all content at the unfinished layer upload - // location and invalidates any future calls to this layer upload - CancelLayerUpload(location string) error + // FinishChunkedBlobUpload completes a chunked blob upload at a given + // location. + FinishChunkedBlobUpload(location string, length int, digest string) error + + // CancelBlobUpload deletes all content at the unfinished blob upload + // location and invalidates any future calls to this blob upload. + CancelBlobUpload(location string) error } // New returns a new Client which operates against a registry with the // given base endpoint -// This endpoint should not include /v2/ or any part of the url after this +// This endpoint should not include /v2/ or any part of the url after this. func New(endpoint string) Client { return &clientImpl{endpoint} } @@ -220,9 +222,41 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { return tags.Tags, nil } -func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.ReadCloser, int, error) { +func (r *clientImpl) BlobLength(name, digest string) (int, error) { + response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest)) + if err != nil { + return -1, err + } + defer response.Body.Close() + + // TODO(bbland): handle other status codes, like 5xx errors + switch { + case response.StatusCode == http.StatusOK: + lengthHeader := response.Header.Get("Content-Length") + length, err := strconv.ParseInt(lengthHeader, 10, 0) + if err != nil { + return -1, err + } + return int(length), nil + case response.StatusCode == http.StatusNotFound: + return -1, nil + case response.StatusCode >= 400 && response.StatusCode < 500: + errors := new(registry.Errors) + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&errors) + if err != nil { + return -1, err + } + return -1, errors + default: + response.Body.Close() + return -1, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + } +} + +func (r *clientImpl) GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) { getRequest, err := http.NewRequest("GET", - fmt.Sprintf("%s/v2/%s/layer/%s", r.Endpoint, name, tarsum), nil) + fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest), nil) if err != nil { return nil, 0, err } @@ -233,9 +267,6 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read return nil, 0, err } - if response.StatusCode == http.StatusNotFound { - return nil, 0, ®istry.LayerNotFoundError{Name: name, TarSum: tarsum} - } // TODO(bbland): handle other status codes, like 5xx errors switch { case response.StatusCode == http.StatusOK: @@ -247,7 +278,7 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read return response.Body, int(length), nil case response.StatusCode == http.StatusNotFound: response.Body.Close() - return nil, 0, ®istry.LayerNotFoundError{Name: name, TarSum: tarsum} + return nil, 0, ®istry.BlobNotFoundError{Name: name, Digest: digest} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -262,9 +293,9 @@ func (r *clientImpl) GetImageLayer(name, tarsum string, byteOffset int) (io.Read } } -func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { +func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { postRequest, err := http.NewRequest("POST", - fmt.Sprintf("%s/v2/%s/layer/%s/upload/", r.Endpoint, name, tarsum), nil) + fmt.Sprintf("%s/v2/%s/blob/upload/", r.Endpoint, name), nil) if err != nil { return "", err } @@ -279,8 +310,8 @@ func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { switch { case response.StatusCode == http.StatusAccepted: return response.Header.Get("Location"), nil - case response.StatusCode == http.StatusNotModified: - return "", ®istry.LayerAlreadyExistsError{Name: name, TarSum: tarsum} + // case response.StatusCode == http.StatusNotFound: + // return case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -294,7 +325,7 @@ func (r *clientImpl) InitiateLayerUpload(name, tarsum string) (string, error) { } } -func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { +func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { response, err := http.Get(fmt.Sprintf("%s%s", r.Endpoint, location)) if err != nil { return 0, 0, err @@ -306,7 +337,7 @@ func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { case response.StatusCode == http.StatusNoContent: return parseRangeHeader(response.Header.Get("Range")) case response.StatusCode == http.StatusNotFound: - return 0, 0, ®istry.LayerUploadNotFoundError{Location: location} + return 0, 0, ®istry.BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -320,18 +351,18 @@ func (r *clientImpl) GetLayerUploadStatus(location string) (int, int, error) { } } -func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length int, checksum *registry.Checksum) error { - defer layer.Close() +func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, digest string) error { + defer blob.Close() putRequest, err := http.NewRequest("PUT", - fmt.Sprintf("%s%s", r.Endpoint, location), layer) + fmt.Sprintf("%s%s", r.Endpoint, location), blob) if err != nil { return err } queryValues := url.Values{} queryValues.Set("length", fmt.Sprint(length)) - queryValues.Set(checksum.HashAlgorithm, checksum.Sum) + queryValues.Set("digest", digest) putRequest.URL.RawQuery = queryValues.Encode() putRequest.Header.Set("Content-Type", "application/octet-stream") @@ -348,7 +379,7 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{Location: location} + return ®istry.BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -362,11 +393,11 @@ func (r *clientImpl) UploadLayer(location string, layer io.ReadCloser, length in } } -func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, length, startByte int) error { - defer layerChunk.Close() +func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { + defer blobChunk.Close() putRequest, err := http.NewRequest("PUT", - fmt.Sprintf("%s%s", r.Endpoint, location), layerChunk) + fmt.Sprintf("%s%s", r.Endpoint, location), blobChunk) if err != nil { return err } @@ -389,17 +420,17 @@ func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, case response.StatusCode == http.StatusAccepted: return nil case response.StatusCode == http.StatusRequestedRangeNotSatisfiable: - lastValidRange, layerSize, err := parseRangeHeader(response.Header.Get("Range")) + lastValidRange, blobSize, err := parseRangeHeader(response.Header.Get("Range")) if err != nil { return err } - return ®istry.LayerUploadInvalidRangeError{ + return ®istry.BlobUploadInvalidRangeError{ Location: location, LastValidRange: lastValidRange, - LayerSize: layerSize, + BlobSize: blobSize, } case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{Location: location} + return ®istry.BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -413,7 +444,7 @@ func (r *clientImpl) UploadLayerChunk(location string, layerChunk io.ReadCloser, } } -func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, checksum *registry.Checksum) error { +func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, digest string) error { putRequest, err := http.NewRequest("PUT", fmt.Sprintf("%s%s", r.Endpoint, location), nil) if err != nil { @@ -422,7 +453,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check queryValues := new(url.Values) queryValues.Set("length", fmt.Sprint(length)) - queryValues.Set(checksum.HashAlgorithm, checksum.Sum) + queryValues.Set("digest", digest) putRequest.URL.RawQuery = queryValues.Encode() putRequest.Header.Set("Content-Type", "application/octet-stream") @@ -441,7 +472,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{Location: location} + return ®istry.BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -455,7 +486,7 @@ func (r *clientImpl) FinishChunkedLayerUpload(location string, length int, check } } -func (r *clientImpl) CancelLayerUpload(location string) error { +func (r *clientImpl) CancelBlobUpload(location string) error { deleteRequest, err := http.NewRequest("DELETE", fmt.Sprintf("%s%s", r.Endpoint, location), nil) if err != nil { @@ -473,7 +504,7 @@ func (r *clientImpl) CancelLayerUpload(location string) error { case response.StatusCode == http.StatusNoContent: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.LayerUploadNotFoundError{Location: location} + return ®istry.BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -490,7 +521,7 @@ func (r *clientImpl) CancelLayerUpload(location string) error { // imageManifestURL is a helper method for returning the full url to an image // manifest func (r *clientImpl) imageManifestURL(name, tag string) string { - return fmt.Sprintf("%s/v2/%s/image/%s", r.Endpoint, name, tag) + return fmt.Sprintf("%s/v2/%s/manifest/%s", r.Endpoint, name, tag) } // parseRangeHeader parses out the offset and length from a returned Range diff --git a/client/client_test.go b/client/client_test.go index e900463a..9840ae44 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -13,85 +13,87 @@ import ( "github.com/docker/docker-registry/test" ) -type testLayer struct { - tarSum string +type testBlob struct { + digest string contents []byte } func TestPush(t *testing.T) { name := "hello/world" tag := "sometag" - testLayers := []testLayer{ + testBlobs := []testBlob{ { - tarSum: "12345", + digest: "12345", contents: []byte("some contents"), }, { - tarSum: "98765", + digest: "98765", contents: []byte("some other contents"), }, } - uploadLocations := make([]string, len(testLayers)) - layers := make([]registry.FSLayer, len(testLayers)) - history := make([]registry.ManifestHistory, len(testLayers)) + uploadLocations := make([]string, len(testBlobs)) + blobs := make([]registry.FSLayer, len(testBlobs)) + history := make([]registry.ManifestHistory, len(testBlobs)) - for i, layer := range testLayers { - uploadLocations[i] = fmt.Sprintf("/v2/%s/layer/%s/upload-location-%d", name, layer.tarSum, i) - layers[i] = registry.FSLayer{BlobSum: layer.tarSum} - history[i] = registry.ManifestHistory{V1Compatibility: layer.tarSum} + for i, blob := range testBlobs { + // TODO(bbland): this is returning the same location for all uploads, + // because we can't know which blob will get which location. + // It's sort of okay because we're using unique digests, but this needs + // to change at some point. + uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name) + blobs[i] = registry.FSLayer{BlobSum: blob.digest} + history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} } manifest := ®istry.ImageManifest{ Name: name, Tag: tag, Architecture: "x86", - FSLayers: layers, + FSLayers: blobs, History: history, SchemaVersion: 1, } manifestBytes, err := json.Marshal(manifest) - layerRequestResponseMappings := make([]test.RequestResponseMapping, 2*len(testLayers)) - for i, layer := range testLayers { - layerRequestResponseMappings[2*i] = test.RequestResponseMapping{ + blobRequestResponseMappings := make([]test.RequestResponseMapping, 2*len(testBlobs)) + for i, blob := range testBlobs { + blobRequestResponseMappings[2*i] = test.RequestResponseMapping{ Request: test.Request{ Method: "POST", - Route: "/v2/" + name + "/layer/" + layer.tarSum + "/upload/", + Route: "/v2/" + name + "/blob/upload/", }, - Responses: []test.Response{ - { - StatusCode: http.StatusAccepted, - Headers: http.Header(map[string][]string{ - "Location": {uploadLocations[i]}, - }), - }, + Response: test.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Location": {uploadLocations[i]}, + }), }, } - layerRequestResponseMappings[2*i+1] = test.RequestResponseMapping{ + blobRequestResponseMappings[2*i+1] = test.RequestResponseMapping{ Request: test.Request{ Method: "PUT", Route: uploadLocations[i], - Body: layer.contents, - }, - Responses: []test.Response{ - { - StatusCode: http.StatusCreated, + QueryParams: map[string][]string{ + "length": {fmt.Sprint(len(blob.contents))}, + "digest": {blob.digest}, }, + Body: blob.contents, + }, + Response: test.Response{ + StatusCode: http.StatusCreated, }, } } - handler := test.NewHandler(append(layerRequestResponseMappings, test.RequestResponseMap{ + handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ test.RequestResponseMapping{ Request: test.Request{ Method: "PUT", - Route: "/v2/" + name + "/image/" + tag, + Route: "/v2/" + name + "/manifest/" + tag, Body: manifestBytes, }, - Responses: []test.Response{ - { - StatusCode: http.StatusOK, - }, + Response: test.Response{ + StatusCode: http.StatusOK, }, }, }...)) @@ -103,8 +105,8 @@ func TestPush(t *testing.T) { layerStorage: make(map[string]Layer), } - for _, layer := range testLayers { - l, err := objectStore.Layer(layer.tarSum) + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) if err != nil { t.Fatal(err) } @@ -114,7 +116,7 @@ func TestPush(t *testing.T) { t.Fatal(err) } - writer.Write(layer.contents) + writer.Write(blob.contents) writer.Close() } @@ -129,61 +131,57 @@ func TestPush(t *testing.T) { func TestPull(t *testing.T) { name := "hello/world" tag := "sometag" - testLayers := []testLayer{ + testBlobs := []testBlob{ { - tarSum: "12345", + digest: "12345", contents: []byte("some contents"), }, { - tarSum: "98765", + digest: "98765", contents: []byte("some other contents"), }, } - layers := make([]registry.FSLayer, len(testLayers)) - history := make([]registry.ManifestHistory, len(testLayers)) + blobs := make([]registry.FSLayer, len(testBlobs)) + history := make([]registry.ManifestHistory, len(testBlobs)) - for i, layer := range testLayers { - layers[i] = registry.FSLayer{BlobSum: layer.tarSum} - history[i] = registry.ManifestHistory{V1Compatibility: layer.tarSum} + for i, blob := range testBlobs { + blobs[i] = registry.FSLayer{BlobSum: blob.digest} + history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} } manifest := ®istry.ImageManifest{ Name: name, Tag: tag, Architecture: "x86", - FSLayers: layers, + FSLayers: blobs, History: history, SchemaVersion: 1, } manifestBytes, err := json.Marshal(manifest) - layerRequestResponseMappings := make([]test.RequestResponseMapping, len(testLayers)) - for i, layer := range testLayers { - layerRequestResponseMappings[i] = test.RequestResponseMapping{ + blobRequestResponseMappings := make([]test.RequestResponseMapping, len(testBlobs)) + for i, blob := range testBlobs { + blobRequestResponseMappings[i] = test.RequestResponseMapping{ Request: test.Request{ Method: "GET", - Route: "/v2/" + name + "/layer/" + layer.tarSum, + Route: "/v2/" + name + "/blob/" + blob.digest, }, - Responses: []test.Response{ - { - StatusCode: http.StatusOK, - Body: layer.contents, - }, + Response: test.Response{ + StatusCode: http.StatusOK, + Body: blob.contents, }, } } - handler := test.NewHandler(append(layerRequestResponseMappings, test.RequestResponseMap{ + handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ test.RequestResponseMapping{ Request: test.Request{ Method: "GET", - Route: "/v2/" + name + "/image/" + tag, + Route: "/v2/" + name + "/manifest/" + tag, }, - Responses: []test.Response{ - { - StatusCode: http.StatusOK, - Body: manifestBytes, - }, + Response: test.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, }, }, }...)) @@ -214,8 +212,8 @@ func TestPull(t *testing.T) { t.Fatal("Incorrect manifest") } - for _, layer := range testLayers { - l, err := objectStore.Layer(layer.tarSum) + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) if err != nil { t.Fatal(err) } @@ -226,13 +224,13 @@ func TestPull(t *testing.T) { } defer reader.Close() - layerBytes, err := ioutil.ReadAll(reader) + blobBytes, err := ioutil.ReadAll(reader) if err != nil { t.Fatal(err) } - if string(layerBytes) != string(layer.contents) { - t.Fatal("Incorrect layer") + if string(blobBytes) != string(blob.contents) { + t.Fatal("Incorrect blob") } } } diff --git a/client/pull.go b/client/pull.go index 75cc9af1..825b0c06 100644 --- a/client/pull.go +++ b/client/pull.go @@ -99,7 +99,7 @@ func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. } defer writer.Close() - layerReader, length, err := c.GetImageLayer(name, fsLayer.BlobSum, 0) + layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, 0) if err != nil { log.WithFields(log.Fields{ "error": err, diff --git a/client/push.go b/client/push.go index a1fb0e23..91bd9af6 100644 --- a/client/push.go +++ b/client/push.go @@ -2,7 +2,6 @@ package client import ( "bytes" - "crypto/sha1" "io" "io/ioutil" @@ -89,25 +88,10 @@ func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. }).Warn("Unable to read local layer") return err } - - location, err := c.InitiateLayerUpload(name, fsLayer.BlobSum) - if _, ok := err.(*registry.LayerAlreadyExistsError); ok { - log.WithField("layer", fsLayer).Info("Layer already exists") - return nil - } - if err != nil { - log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to upload layer") - return err - } + defer layerReader.Close() layerBuffer := new(bytes.Buffer) - checksum := sha1.New() - teeReader := io.TeeReader(layerReader, checksum) - - _, err = io.Copy(layerBuffer, teeReader) + layerSize, err := io.Copy(layerBuffer, layerReader) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -116,9 +100,29 @@ func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. return err } - err = c.UploadLayer(location, ioutil.NopCloser(layerBuffer), layerBuffer.Len(), - ®istry.Checksum{HashAlgorithm: "sha1", Sum: string(checksum.Sum(nil))}, - ) + length, err := c.BlobLength(name, fsLayer.BlobSum) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to check existence of remote layer") + return err + } + if length >= 0 { + log.WithField("layer", fsLayer).Info("Layer already exists") + return nil + } + + location, err := c.InitiateBlobUpload(name) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "layer": fsLayer, + }).Warn("Unable to upload layer") + return err + } + + err = c.UploadBlob(location, ioutil.NopCloser(layerBuffer), int(layerSize), fsLayer.BlobSum) if err != nil { log.WithFields(log.Fields{ "error": err, diff --git a/errors.go b/errors.go index abec4965..bb59a5af 100644 --- a/errors.go +++ b/errors.go @@ -224,57 +224,44 @@ func (e *ImageManifestNotFoundError) Error() string { e.Name, e.Tag) } -// LayerAlreadyExistsError is returned when attempting to create a new layer -// that already exists in the registry. -type LayerAlreadyExistsError struct { - Name string - TarSum string -} - -func (e *LayerAlreadyExistsError) Error() string { - return fmt.Sprintf("Layer already found with Name: %s, TarSum: %s", - e.Name, e.TarSum) -} - -// LayerNotFoundError is returned when making an operation against a given image +// BlobNotFoundError is returned when making an operation against a given image // layer that does not exist in the registry. -type LayerNotFoundError struct { +type BlobNotFoundError struct { Name string - TarSum string + Digest string } -func (e *LayerNotFoundError) Error() string { - return fmt.Sprintf("No layer found with Name: %s, TarSum: %s", - e.Name, e.TarSum) +func (e *BlobNotFoundError) Error() string { + return fmt.Sprintf("No blob found with Name: %s, Digest: %s", + e.Name, e.Digest) } -// LayerUploadNotFoundError is returned when making a layer upload operation -// against an invalid layer upload location url +// BlobUploadNotFoundError is returned when making a blob upload operation against an +// invalid blob upload location url. // This may be the result of using a cancelled, completed, or stale upload // location. -type LayerUploadNotFoundError struct { +type BlobUploadNotFoundError struct { Location string } -func (e *LayerUploadNotFoundError) Error() string { - return fmt.Sprintf("No layer found upload found at Location: %s", - e.Location) +func (e *BlobUploadNotFoundError) Error() string { + return fmt.Sprintf("No blob upload found at Location: %s", e.Location) } -// LayerUploadInvalidRangeError is returned when attempting to upload an image -// layer chunk that is out of order. -// This provides the known LayerSize and LastValidRange which can be used to +// BlobUploadInvalidRangeError is returned when attempting to upload an image +// blob chunk that is out of order. +// This provides the known BlobSize and LastValidRange which can be used to // resume the upload. -type LayerUploadInvalidRangeError struct { +type BlobUploadInvalidRangeError struct { Location string LastValidRange int - LayerSize int + BlobSize int } -func (e *LayerUploadInvalidRangeError) Error() string { +func (e *BlobUploadInvalidRangeError) Error() string { return fmt.Sprintf( - "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Layer Size: %d", - e.Location, e.LastValidRange, e.LayerSize) + "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", + e.Location, e.LastValidRange, e.BlobSize) } // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is diff --git a/test/test.go b/test/test.go index 71588819..24a08f75 100644 --- a/test/test.go +++ b/test/test.go @@ -6,16 +6,18 @@ import ( "io" "io/ioutil" "net/http" + "sort" + "strings" ) -// RequestResponseMap is a mapping from Requests to Responses +// RequestResponseMap is an ordered mapping from Requests to Responses type RequestResponseMap []RequestResponseMapping -// RequestResponseMapping defines an ordered list of Responses to be sent in -// response to a given Request +// RequestResponseMapping defines a Response to be sent in response to a given +// Request type RequestResponseMapping struct { - Request Request - Responses []Response + Request Request + Response Response } // TODO(bbland): add support for request headers @@ -28,12 +30,28 @@ type Request struct { // Route is the http route of this request Route string + // QueryParams are the query parameters of this request + QueryParams map[string][]string + // Body is the byte contents of the http request Body []byte } func (r Request) String() string { - return fmt.Sprintf("%s %s\n%s", r.Method, r.Route, r.Body) + queryString := "" + if len(r.QueryParams) > 0 { + queryString = "?" + keys := make([]string, 0, len(r.QueryParams)) + for k := range r.QueryParams { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + queryString += strings.Join(r.QueryParams[k], "&") + "&" + } + queryString = queryString[:len(queryString)-1] + } + return fmt.Sprintf("%s %s%s\n%s", r.Method, r.Route, queryString, r.Body) } // Response is a simplified http.Response object @@ -61,7 +79,12 @@ type testHandler struct { func NewHandler(requestResponseMap RequestResponseMap) http.Handler { responseMap := make(map[string][]Response) for _, mapping := range requestResponseMap { - responseMap[mapping.Request.String()] = mapping.Responses + responses, ok := responseMap[mapping.Request.String()] + if ok { + responseMap[mapping.Request.String()] = append(responses, mapping.Response) + } else { + responseMap[mapping.Request.String()] = []Response{mapping.Response} + } } return &testHandler{responseMap: responseMap} } @@ -71,9 +94,10 @@ func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { requestBody, _ := ioutil.ReadAll(r.Body) request := Request{ - Method: r.Method, - Route: r.URL.Path, - Body: requestBody, + Method: r.Method, + Route: r.URL.Path, + QueryParams: r.URL.Query(), + Body: requestBody, } responses, ok := app.responseMap[request.String()] From 64c8bd29ccecbcdf5f9c6f56eec05915703b5293 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 19 Nov 2014 18:52:09 -0800 Subject: [PATCH 054/165] Uses new digest package instead of string digests Also drops extraneous test package and uses testutil instead --- client/client.go | 27 ++++++----- client/client_test.go | 55 +++++++++++----------- client/objectstore.go | 11 +++-- test/test.go => common/testutil/handler.go | 2 +- errors.go | 4 +- images.go | 3 +- 6 files changed, 54 insertions(+), 48 deletions(-) rename test/test.go => common/testutil/handler.go (99%) diff --git a/client/client.go b/client/client.go index 8eb156c5..944050e0 100644 --- a/client/client.go +++ b/client/client.go @@ -11,6 +11,7 @@ import ( "strconv" "github.com/docker/docker-registry" + "github.com/docker/docker-registry/digest" ) // Client implements the client interface to the registry http api @@ -33,13 +34,13 @@ type Client interface { // BlobLength returns the length of the blob stored at the given name, // digest pair. // Returns a length value of -1 on error or if the blob does not exist. - BlobLength(name, digest string) (int, error) + BlobLength(name string, dgst digest.Digest) (int, error) // GetBlob returns the blob stored at the given name, digest pair in the // form of an io.ReadCloser with the length of this blob. // A nonzero byteOffset can be provided to receive a partial blob beginning // at the given offset. - GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) + GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) // InitiateBlobUpload starts a blob upload in the given repository namespace // and returns a unique location url to use for other blob upload methods. @@ -50,7 +51,7 @@ type Client interface { GetBlobUploadStatus(location string) (int, int, error) // UploadBlob uploads a full blob to the registry. - UploadBlob(location string, blob io.ReadCloser, length int, digest string) error + UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error // UploadBlobChunk uploads a blob chunk with a given length and startByte to // the registry. @@ -59,7 +60,7 @@ type Client interface { // FinishChunkedBlobUpload completes a chunked blob upload at a given // location. - FinishChunkedBlobUpload(location string, length int, digest string) error + FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error // CancelBlobUpload deletes all content at the unfinished blob upload // location and invalidates any future calls to this blob upload. @@ -222,8 +223,8 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { return tags.Tags, nil } -func (r *clientImpl) BlobLength(name, digest string) (int, error) { - response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest)) +func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { + response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst)) if err != nil { return -1, err } @@ -254,9 +255,9 @@ func (r *clientImpl) BlobLength(name, digest string) (int, error) { } } -func (r *clientImpl) GetBlob(name, digest string, byteOffset int) (io.ReadCloser, int, error) { +func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { getRequest, err := http.NewRequest("GET", - fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, digest), nil) + fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst), nil) if err != nil { return nil, 0, err } @@ -278,7 +279,7 @@ func (r *clientImpl) GetBlob(name, digest string, byteOffset int) (io.ReadCloser return response.Body, int(length), nil case response.StatusCode == http.StatusNotFound: response.Body.Close() - return nil, 0, ®istry.BlobNotFoundError{Name: name, Digest: digest} + return nil, 0, ®istry.BlobNotFoundError{Name: name, Digest: dgst} case response.StatusCode >= 400 && response.StatusCode < 500: errors := new(registry.Errors) decoder := json.NewDecoder(response.Body) @@ -351,7 +352,7 @@ func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { } } -func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, digest string) error { +func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { defer blob.Close() putRequest, err := http.NewRequest("PUT", @@ -362,7 +363,7 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, queryValues := url.Values{} queryValues.Set("length", fmt.Sprint(length)) - queryValues.Set("digest", digest) + queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() putRequest.Header.Set("Content-Type", "application/octet-stream") @@ -444,7 +445,7 @@ func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, l } } -func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, digest string) error { +func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { putRequest, err := http.NewRequest("PUT", fmt.Sprintf("%s%s", r.Endpoint, location), nil) if err != nil { @@ -453,7 +454,7 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, digest queryValues := new(url.Values) queryValues.Set("length", fmt.Sprint(length)) - queryValues.Set("digest", digest) + queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() putRequest.Header.Set("Content-Type", "application/octet-stream") diff --git a/client/client_test.go b/client/client_test.go index 9840ae44..a77e7665 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -10,11 +10,12 @@ import ( "testing" "github.com/docker/docker-registry" - "github.com/docker/docker-registry/test" + "github.com/docker/docker-registry/common/testutil" + "github.com/docker/docker-registry/digest" ) type testBlob struct { - digest string + digest digest.Digest contents []byte } @@ -42,7 +43,7 @@ func TestPush(t *testing.T) { // to change at some point. uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name) blobs[i] = registry.FSLayer{BlobSum: blob.digest} - history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} + history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()} } manifest := ®istry.ImageManifest{ @@ -55,44 +56,44 @@ func TestPush(t *testing.T) { } manifestBytes, err := json.Marshal(manifest) - blobRequestResponseMappings := make([]test.RequestResponseMapping, 2*len(testBlobs)) + blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) for i, blob := range testBlobs { - blobRequestResponseMappings[2*i] = test.RequestResponseMapping{ - Request: test.Request{ + blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ Method: "POST", Route: "/v2/" + name + "/blob/upload/", }, - Response: test.Response{ + Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Location": {uploadLocations[i]}, }), }, } - blobRequestResponseMappings[2*i+1] = test.RequestResponseMapping{ - Request: test.Request{ + blobRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ + Request: testutil.Request{ Method: "PUT", Route: uploadLocations[i], QueryParams: map[string][]string{ "length": {fmt.Sprint(len(blob.contents))}, - "digest": {blob.digest}, + "digest": {blob.digest.String()}, }, Body: blob.contents, }, - Response: test.Response{ + Response: testutil.Response{ StatusCode: http.StatusCreated, }, } } - handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ - test.RequestResponseMapping{ - Request: test.Request{ + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ + testutil.RequestResponseMapping{ + Request: testutil.Request{ Method: "PUT", Route: "/v2/" + name + "/manifest/" + tag, Body: manifestBytes, }, - Response: test.Response{ + Response: testutil.Response{ StatusCode: http.StatusOK, }, }, @@ -102,7 +103,7 @@ func TestPush(t *testing.T) { objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), manifestStorage: make(map[string]*registry.ImageManifest), - layerStorage: make(map[string]Layer), + layerStorage: make(map[digest.Digest]Layer), } for _, blob := range testBlobs { @@ -146,7 +147,7 @@ func TestPull(t *testing.T) { for i, blob := range testBlobs { blobs[i] = registry.FSLayer{BlobSum: blob.digest} - history[i] = registry.ManifestHistory{V1Compatibility: blob.digest} + history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()} } manifest := ®istry.ImageManifest{ @@ -159,27 +160,27 @@ func TestPull(t *testing.T) { } manifestBytes, err := json.Marshal(manifest) - blobRequestResponseMappings := make([]test.RequestResponseMapping, len(testBlobs)) + blobRequestResponseMappings := make([]testutil.RequestResponseMapping, len(testBlobs)) for i, blob := range testBlobs { - blobRequestResponseMappings[i] = test.RequestResponseMapping{ - Request: test.Request{ + blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/blob/" + blob.digest, + Route: "/v2/" + name + "/blob/" + blob.digest.String(), }, - Response: test.Response{ + Response: testutil.Response{ StatusCode: http.StatusOK, Body: blob.contents, }, } } - handler := test.NewHandler(append(blobRequestResponseMappings, test.RequestResponseMap{ - test.RequestResponseMapping{ - Request: test.Request{ + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ + testutil.RequestResponseMapping{ + Request: testutil.Request{ Method: "GET", Route: "/v2/" + name + "/manifest/" + tag, }, - Response: test.Response{ + Response: testutil.Response{ StatusCode: http.StatusOK, Body: manifestBytes, }, @@ -190,7 +191,7 @@ func TestPull(t *testing.T) { objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), manifestStorage: make(map[string]*registry.ImageManifest), - layerStorage: make(map[string]Layer), + layerStorage: make(map[digest.Digest]Layer), } err = Pull(client, objectStore, name, tag) diff --git a/client/objectstore.go b/client/objectstore.go index d8e2ac76..bee73ff0 100644 --- a/client/objectstore.go +++ b/client/objectstore.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/docker/docker-registry" + "github.com/docker/docker-registry/digest" ) var ( @@ -34,7 +35,7 @@ type ObjectStore interface { WriteManifest(name, tag string, manifest *registry.ImageManifest) error // Layer returns a handle to a layer for reading and writing - Layer(blobSum string) (Layer, error) + Layer(dgst digest.Digest) (Layer, error) } // Layer is a generic image layer interface. @@ -56,7 +57,7 @@ type Layer interface { type memoryObjectStore struct { mutex *sync.Mutex manifestStorage map[string]*registry.ImageManifest - layerStorage map[string]Layer + layerStorage map[digest.Digest]Layer } func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageManifest, error) { @@ -78,14 +79,14 @@ func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *reg return nil } -func (objStore *memoryObjectStore) Layer(blobSum string) (Layer, error) { +func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { objStore.mutex.Lock() defer objStore.mutex.Unlock() - layer, ok := objStore.layerStorage[blobSum] + layer, ok := objStore.layerStorage[dgst] if !ok { layer = &memoryLayer{cond: sync.NewCond(new(sync.Mutex))} - objStore.layerStorage[blobSum] = layer + objStore.layerStorage[dgst] = layer } return layer, nil diff --git a/test/test.go b/common/testutil/handler.go similarity index 99% rename from test/test.go rename to common/testutil/handler.go index 24a08f75..fa118cd1 100644 --- a/test/test.go +++ b/common/testutil/handler.go @@ -1,4 +1,4 @@ -package test +package testutil import ( "bytes" diff --git a/errors.go b/errors.go index bb59a5af..9a28e5b6 100644 --- a/errors.go +++ b/errors.go @@ -3,6 +3,8 @@ package registry import ( "fmt" "strings" + + "github.com/docker/docker-registry/digest" ) // ErrorCode represents the error type. The errors are serialized via strings @@ -228,7 +230,7 @@ func (e *ImageManifestNotFoundError) Error() string { // layer that does not exist in the registry. type BlobNotFoundError struct { Name string - Digest string + Digest digest.Digest } func (e *BlobNotFoundError) Error() string { diff --git a/images.go b/images.go index e30c6a5f..534069b2 100644 --- a/images.go +++ b/images.go @@ -4,6 +4,7 @@ import ( "encoding/json" "net/http" + "github.com/docker/docker-registry/digest" "github.com/gorilla/handlers" ) @@ -52,7 +53,7 @@ func (m *ImageManifest) UnmarshalJSON(b []byte) error { // FSLayer is a container struct for BlobSums defined in an image manifest type FSLayer struct { // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum string `json:"blobSum"` + BlobSum digest.Digest `json:"blobSum"` } // ManifestHistory stores unstructured v1 compatibility information From 68fd15b6885c09210558433017007054c5894c58 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 20 Nov 2014 14:11:49 -0800 Subject: [PATCH 055/165] Miscellaneous storagedriver+ipc fixes Fixes/tests listing for keys beginning with "/" No longer extraneously wraps Closers in ioutil.NopClosers Uses omitempty for all ipc struct type fields --- storagedriver/filesystem/driver.go | 5 ++-- storagedriver/inmemory/driver.go | 7 ++++-- storagedriver/ipc/client.go | 2 +- storagedriver/ipc/ipc.go | 34 +++++++++++++------------- storagedriver/ipc/server.go | 2 +- storagedriver/testsuites/testsuites.go | 8 ++++-- 6 files changed, 33 insertions(+), 25 deletions(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index eabb493d..a4b2e688 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "path" - "strings" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/factory" @@ -177,7 +176,9 @@ func (d *Driver) CurrentSize(subPath string) (uint64, error) { // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(subPath string) ([]string, error) { - subPath = strings.TrimRight(subPath, "/") + if subPath[len(subPath)-1] != '/' { + subPath += "/" + } fullPath := d.subPath(subPath) dir, err := os.Open(fullPath) diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 8685eb25..0d28b2da 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -121,14 +121,17 @@ func (d *Driver) CurrentSize(path string) (uint64, error) { // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(path string) ([]string, error) { - subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s/[^/]+", path)) + if path[len(path)-1] != '/' { + path += "/" + } + subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s[^/]+", path)) if err != nil { return nil, err } d.mutex.RLock() defer d.mutex.RUnlock() - // we use map to collect uniq keys + // we use map to collect unique keys keySet := make(map[string]struct{}) for k := range d.storage { if key := subPathMatcher.FindString(k); key != "" { diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 51b02b46..a0ff3788 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -267,7 +267,7 @@ func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, } receiver, remoteSender := libchan.Pipe() - params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": ioutil.NopCloser(reader)} + params := map[string]interface{}{"Path": path, "Offset": offset, "Size": size, "Reader": reader} err := driver.sender.Send(&Request{Type: "WriteStream", Parameters: params, ResponseChannel: remoteSender}) if err != nil { return err diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 182a1af6..82bdcbd7 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -31,9 +31,9 @@ func (e IncompatibleVersionError) Error() string { // Request defines a remote method call request // A return value struct is to be sent over the ResponseChannel type Request struct { - Type string - Parameters map[string]interface{} - ResponseChannel libchan.Sender + Type string `codec:",omitempty"` + Parameters map[string]interface{} `codec:",omitempty"` + ResponseChannel libchan.Sender `codec:",omitempty"` } // ResponseError is a serializable error type. @@ -41,9 +41,9 @@ type Request struct { // client side, falling back to using the Type and Message if this cannot be // done. type ResponseError struct { - Type string - Message string - Parameters map[string]interface{} + Type string `codec:",omitempty"` + Message string `codec:",omitempty"` + Parameters map[string]interface{} `codec:",omitempty"` } // WrapError wraps an error in a serializable struct containing the error's type @@ -108,39 +108,39 @@ func (err *ResponseError) Error() string { // VersionResponse is a response for a Version request type VersionResponse struct { - Version storagedriver.Version - Error *ResponseError + Version storagedriver.Version `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` } // ReadStreamResponse is a response for a ReadStream request type ReadStreamResponse struct { - Reader io.ReadCloser - Error *ResponseError + Reader io.ReadCloser `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` } // WriteStreamResponse is a response for a WriteStream request type WriteStreamResponse struct { - Error *ResponseError + Error *ResponseError `codec:",omitempty"` } // CurrentSizeResponse is a response for a CurrentSize request type CurrentSizeResponse struct { - Position uint64 - Error *ResponseError + Position uint64 `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` } // ListResponse is a response for a List request type ListResponse struct { - Keys []string - Error *ResponseError + Keys []string `codec:",omitempty"` + Error *ResponseError `codec:",omitempty"` } // MoveResponse is a response for a Move request type MoveResponse struct { - Error *ResponseError + Error *ResponseError `codec:",omitempty"` } // DeleteResponse is a response for a Delete request type DeleteResponse struct { - Error *ResponseError + Error *ResponseError `codec:",omitempty"` } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 71422f93..7d1876ca 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -106,7 +106,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { if err != nil { response = ReadStreamResponse{Error: WrapError(err)} } else { - response = ReadStreamResponse{Reader: ioutil.NopCloser(reader)} + response = ReadStreamResponse{Reader: reader} } err = request.ResponseChannel.Send(&response) if err != nil { diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 45633d10..4c86b05a 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -253,7 +253,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := randomString(uint64(8 + rand.Intn(8))) + rootDirectory := "/" + randomString(uint64(8+rand.Intn(8))) defer suite.StorageDriver.Delete(rootDirectory) parentDirectory := rootDirectory + "/" + randomString(uint64(8+rand.Intn(8))) @@ -266,7 +266,11 @@ func (suite *DriverSuite) TestList(c *check.C) { } sort.Strings(childFiles) - keys, err := suite.StorageDriver.List(rootDirectory) + keys, err := suite.StorageDriver.List("/") + c.Assert(err, check.IsNil) + c.Assert(keys, check.DeepEquals, []string{rootDirectory}) + + keys, err = suite.StorageDriver.List(rootDirectory) c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{parentDirectory}) From 3f6cb5235714f1cbf0fb1fe9b7dd68456cd7f0f1 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Wed, 19 Nov 2014 14:53:32 -0800 Subject: [PATCH 056/165] Move to own dev image --- .drone.yml | 10 +--------- project/dev-image/Dockerfile | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 9 deletions(-) create mode 100644 project/dev-image/Dockerfile diff --git a/.drone.yml b/.drone.yml index f98fe203..2d42f6a5 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,14 +1,6 @@ -image: bradrydzewski/go:1.3 - -env: - - GOROOT=/usr/local/go - - PATH=$PATH:$GOROOT/bin:$GOPATH/bin +image: dmp42/go:stable script: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get github.com/golang/lint/golint - - go get -t ./... - FAIL=$(find ./ -iname "*.go" -exec gofmt -s -l {} \;) && echo "$FAIL" && test -z "$FAIL" diff --git a/project/dev-image/Dockerfile b/project/dev-image/Dockerfile new file mode 100644 index 00000000..f77af95c --- /dev/null +++ b/project/dev-image/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:14.04 + +ENV GOLANG_VERSION 1.4rc1 +ENV GOPATH /var/cache/drone +ENV GOROOT /usr/local/go +ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin + +ENV LANG C + +RUN apt-get update && apt-get install -y \ + wget ca-certificates git mercurial bzr \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ + tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ + rm go${GOLANG_VERSION}.linux-amd64.tar.gz + +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint \ No newline at end of file From 5a804ac05bc3facd61953908da99310a257727a1 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 21 Nov 2014 03:15:34 +0300 Subject: [PATCH 057/165] [Client] Fix possible goroutine leak. Running goroutines with pullLayer are blocked to send error of a pull operation. If we abort pulling without notify them about cancelation they will get stucked forever. To avoid this possible leak cancelCh was introduced. In case of abort we close that channel to notify other goroutines about cancelation. --- client/pull.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/client/pull.go b/client/pull.go index 825b0c06..dffa392e 100644 --- a/client/pull.go +++ b/client/pull.go @@ -36,6 +36,11 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { errChans[i] = make(chan error) } + // To avoid leak goroutine we must notify + // pullLayer goroutines about a cancelation, + // otherwise they will lock forever. + cancelCh := make(chan struct{}) + // Iterate over each layer in the manifest, simultaneously pulling no more // than simultaneousLayerPullWindow layers at a time. If an error is // received from a layer pull, we abort the push. @@ -45,13 +50,17 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { err := <-errChans[dependentLayer] if err != nil { log.WithField("error", err).Warn("Pull aborted") + close(cancelCh) return err } } if i < len(manifest.FSLayers) { go func(i int) { - errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]) + select { + case errChans[i] <- pullLayer(c, objectStore, name, manifest.FSLayers[i]): + case <-cancelCh: // no chance to recv until cancelCh's closed + } }(i) } } From 18eac89506705b84b3d4871daa36bf02664b5e6d Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 20 Nov 2014 14:50:51 -0800 Subject: [PATCH 058/165] Adds a test for concurrent storagedriver Write/Read Stream operations This test is currently failing and Skipped for IPC drivers --- storagedriver/ipc/client.go | 4 +- storagedriver/testsuites/testsuites.go | 140 ++++++++++++++++++------- 2 files changed, 104 insertions(+), 40 deletions(-) diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index a0ff3788..08f7b800 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -422,10 +422,10 @@ func (driver *StorageDriverClient) handleSubprocessExit() { // stopped func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { receiveChan := make(chan error, 1) - go func(receiveChan chan<- error) { + go func(receiver libchan.Receiver, receiveChan chan<- error) { defer close(receiveChan) receiveChan <- receiver.Receive(response) - }(receiveChan) + }(receiver, receiveChan) var err error var ok bool diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 4c86b05a..61756667 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -4,6 +4,7 @@ import ( "bytes" "io/ioutil" "math/rand" + "os" "path" "sort" "testing" @@ -17,7 +18,8 @@ import ( // Test hooks up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -// RegisterInProcessSuite registers an in-process storage driver test suite with the go test runner +// RegisterInProcessSuite registers an in-process storage driver test suite with +// the go test runner. func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { check.Suite(&DriverSuite{ Constructor: driverConstructor, @@ -25,8 +27,8 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC }) } -// RegisterIPCSuite registers a storage driver test suite which runs the named driver as a child -// process with the given parameters +// RegisterIPCSuite registers a storage driver test suite which runs the named +// driver as a child process with the given parameters. func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { suite := &DriverSuite{ Constructor: func() (storagedriver.StorageDriver, error) { @@ -53,21 +55,26 @@ func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck check.Suite(suite) } -// SkipCheck is a function used to determine if a test suite should be skipped -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with the given reason +// SkipCheck is a function used to determine if a test suite should be skipped. +// If a SkipCheck returns a non-empty skip reason, the suite is skipped with +// the given reason. type SkipCheck func() (reason string) -// NeverSkip is a default SkipCheck which never skips the suite +// NeverSkip is a default SkipCheck which never skips the suite. var NeverSkip SkipCheck = func() string { return "" } -// DriverConstructor is a function which returns a new storagedriver.StorageDriver +// DriverConstructor is a function which returns a new +// storagedriver.StorageDriver. type DriverConstructor func() (storagedriver.StorageDriver, error) -// DriverTeardown is a function which cleans up a suite's storagedriver.StorageDriver +// DriverTeardown is a function which cleans up a suite's +// storagedriver.StorageDriver. type DriverTeardown func() error -// DriverSuite is a gocheck test suite designed to test a storagedriver.StorageDriver -// The intended way to create a DriverSuite is with RegisterInProcessSuite or RegisterIPCSuite +// DriverSuite is a gocheck test suite designed to test a +// storagedriver.StorageDriver. +// The intended way to create a DriverSuite is with RegisterInProcessSuite or +// RegisterIPCSuite. type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown @@ -75,7 +82,7 @@ type DriverSuite struct { storagedriver.StorageDriver } -// SetUpSuite sets up the gocheck test suite +// SetUpSuite sets up the gocheck test suite. func (suite *DriverSuite) SetUpSuite(c *check.C) { if reason := suite.SkipCheck(); reason != "" { c.Skip(reason) @@ -85,7 +92,7 @@ func (suite *DriverSuite) SetUpSuite(c *check.C) { suite.StorageDriver = d } -// TearDownSuite tears down the gocheck test suite +// TearDownSuite tears down the gocheck test suite. func (suite *DriverSuite) TearDownSuite(c *check.C) { if suite.Teardown != nil { err := suite.Teardown() @@ -93,35 +100,35 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) { } } -// TestWriteRead1 tests a simple write-read workflow +// TestWriteRead1 tests a simple write-read workflow. func (suite *DriverSuite) TestWriteRead1(c *check.C) { filename := randomString(32) contents := []byte("a") - suite.writeReadCompare(c, filename, contents, contents) + suite.writeReadCompare(c, filename, contents) } -// TestWriteRead2 tests a simple write-read workflow with unicode data +// TestWriteRead2 tests a simple write-read workflow with unicode data. func (suite *DriverSuite) TestWriteRead2(c *check.C) { filename := randomString(32) contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents, contents) + suite.writeReadCompare(c, filename, contents) } -// TestWriteRead3 tests a simple write-read workflow with a small string +// TestWriteRead3 tests a simple write-read workflow with a small string. func (suite *DriverSuite) TestWriteRead3(c *check.C) { filename := randomString(32) contents := []byte(randomString(32)) - suite.writeReadCompare(c, filename, contents, contents) + suite.writeReadCompare(c, filename, contents) } -// TestWriteRead4 tests a simple write-read workflow with 1MB of data +// TestWriteRead4 tests a simple write-read workflow with 1MB of data. func (suite *DriverSuite) TestWriteRead4(c *check.C) { filename := randomString(32) contents := []byte(randomString(1024 * 1024)) - suite.writeReadCompare(c, filename, contents, contents) + suite.writeReadCompare(c, filename, contents) } -// TestReadNonexistent tests reading content from an empty path +// TestReadNonexistent tests reading content from an empty path. func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.GetContent(filename) @@ -129,39 +136,39 @@ func (suite *DriverSuite) TestReadNonexistent(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -// TestWriteReadStreams1 tests a simple write-read streaming workflow +// TestWriteReadStreams1 tests a simple write-read streaming workflow. func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { filename := randomString(32) contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents, contents) + suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data +// unicode data. func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { filename := randomString(32) contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents, contents) + suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data +// small amount of data. func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { filename := randomString(32) contents := []byte(randomString(32)) - suite.writeReadCompareStreams(c, filename, contents, contents) + suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data +// of data. func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { filename := randomString(32) contents := []byte(randomString(1024 * 1024)) - suite.writeReadCompareStreams(c, filename, contents, contents) + suite.writeReadCompareStreams(c, filename, contents) } // TestContinueStreamAppend tests that a stream write can be appended to without -// corrupting the data +// corrupting the data. func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) @@ -200,7 +207,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { } // TestReadStreamWithOffset tests that the appropriate data is streamed when -// reading with a given offset +// reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) @@ -243,7 +250,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { } // TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails +// fails. func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomString(32) _, err := suite.StorageDriver.ReadStream(filename, 0) @@ -251,7 +258,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -// TestList checks the returned list of keys after populating a directory tree +// TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomString(uint64(8+rand.Intn(8))) defer suite.StorageDriver.Delete(rootDirectory) @@ -282,7 +289,7 @@ func (suite *DriverSuite) TestList(c *check.C) { } // TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination +// does exist at the destination. func (suite *DriverSuite) TestMove(c *check.C) { contents := []byte(randomString(32)) sourcePath := randomString(32) @@ -335,7 +342,7 @@ func (suite *DriverSuite) TestDelete(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -// TestDeleteNonexistent checks that removing a nonexistent key fails +// TestDeleteNonexistent checks that removing a nonexistent key fails. func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomString(32) err := suite.StorageDriver.Delete(filename) @@ -343,7 +350,7 @@ func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -// TestDeleteFolder checks that deleting a folder removes all child elements +// TestDeleteFolder checks that deleting a folder removes all child elements. func (suite *DriverSuite) TestDeleteFolder(c *check.C) { dirname := randomString(32) filename1 := randomString(32) @@ -371,7 +378,64 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents, expected []byte) { +// TestConcurrentFileStreams checks that multiple *os.File objects can be passed +// in to WriteStream concurrently without hanging. +// TODO(bbland): fix this test... +func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { + if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { + c.Skip("Need to fix out-of-process concurrency") + } + + doneChan := make(chan struct{}) + + testStream := func(size int) { + suite.testFileStreams(c, size) + doneChan <- struct{}{} + } + + go testStream(8 * 1024 * 1024) + go testStream(4 * 1024 * 1024) + go testStream(2 * 1024 * 1024) + go testStream(1024 * 1024) + go testStream(1024) + go testStream(64) + + for i := 0; i < 6; i++ { + <-doneChan + } + +} + +func (suite *DriverSuite) testFileStreams(c *check.C, size int) { + tf, err := ioutil.TempFile("", "tf") + c.Assert(err, check.IsNil) + defer os.Remove(tf.Name()) + + tfName := path.Base(tf.Name()) + defer suite.StorageDriver.Delete(tfName) + + contents := []byte(randomString(uint64(size))) + + _, err = tf.Write(contents) + c.Assert(err, check.IsNil) + + tf.Sync() + tf.Seek(0, os.SEEK_SET) + + err = suite.StorageDriver.WriteStream(tfName, 0, uint64(size), tf) + c.Assert(err, check.IsNil) + + reader, err := suite.StorageDriver.ReadStream(tfName, 0) + c.Assert(err, check.IsNil) + defer reader.Close() + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + + c.Assert(readContents, check.DeepEquals, contents) +} + +func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { defer suite.StorageDriver.Delete(filename) err := suite.StorageDriver.PutContent(filename, contents) @@ -383,7 +447,7 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents c.Assert(readContents, check.DeepEquals, contents) } -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents, expected []byte) { +func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { defer suite.StorageDriver.Delete(filename) err := suite.StorageDriver.WriteStream(filename, 0, uint64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) From fdd053a618b1dda08d4ed72625318b8db041dca4 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 21 Nov 2014 03:24:25 +0300 Subject: [PATCH 059/165] Typo fix --- client/pull.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/pull.go b/client/pull.go index dffa392e..bce06756 100644 --- a/client/pull.go +++ b/client/pull.go @@ -36,7 +36,7 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { errChans[i] = make(chan error) } - // To avoid leak goroutine we must notify + // To avoid leak of goroutines we must notify // pullLayer goroutines about a cancelation, // otherwise they will lock forever. cancelCh := make(chan struct{}) From 2071422bea9f98a0e5258786b89caa1b0c145509 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 21 Nov 2014 11:20:16 +0300 Subject: [PATCH 060/165] [Client] Fix possible goroutine leak in push. The same as 5a804ac05bc3facd61953908da99310a257727a1 --- client/push.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/client/push.go b/client/push.go index 91bd9af6..08726058 100644 --- a/client/push.go +++ b/client/push.go @@ -36,6 +36,8 @@ func Push(c Client, objectStore ObjectStore, name, tag string) error { errChans[i] = make(chan error) } + cancelCh := make(chan struct{}) + // Iterate over each layer in the manifest, simultaneously pushing no more // than simultaneousLayerPushWindow layers at a time. If an error is // received from a layer push, we abort the push. @@ -45,13 +47,17 @@ func Push(c Client, objectStore ObjectStore, name, tag string) error { err := <-errChans[dependentLayer] if err != nil { log.WithField("error", err).Warn("Push aborted") + close(cancelCh) return err } } if i < len(manifest.FSLayers) { go func(i int) { - errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]) + select { + case errChans[i] <- pushLayer(c, objectStore, name, manifest.FSLayers[i]): + case <-cancelCh: // recv broadcast notification about cancelation + } }(i) } } From 96a557c8e74ed964b5cbb434bfc5c74853686ba9 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Fri, 21 Nov 2014 12:02:34 -0800 Subject: [PATCH 061/165] Move notifications to dt --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 2d42f6a5..60ea41da 100644 --- a/.drone.yml +++ b/.drone.yml @@ -17,7 +17,7 @@ notify: slack: team: docker - channel: "#distribution" + channel: "#dt" username: mom token: {{SLACK_TOKEN}} on_success: true From 3f479b62b4b3f1dede3badc24e9ee1d57431b07c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 20 Nov 2014 17:49:35 -0800 Subject: [PATCH 062/165] Refactor layerReader into fileReader This change separates out the remote file reader functionality from layer reprsentation data. More importantly, issues with seeking have been fixed and thoroughly tested. --- storage/filereader.go | 163 +++++++++++++++++++++++++++++++++++++ storage/filereader_test.go | 158 +++++++++++++++++++++++++++++++++++ storage/layer.go | 4 + storage/layer_test.go | 25 ------ storage/layerreader.go | 145 +-------------------------------- storage/layerstore.go | 33 +++----- storage/layerupload.go | 4 + storage/services.go | 3 +- 8 files changed, 344 insertions(+), 191 deletions(-) create mode 100644 storage/filereader.go create mode 100644 storage/filereader_test.go diff --git a/storage/filereader.go b/storage/filereader.go new file mode 100644 index 00000000..8f1f5205 --- /dev/null +++ b/storage/filereader.go @@ -0,0 +1,163 @@ +package storage + +import ( + "bufio" + "fmt" + "io" + "os" + + "github.com/docker/docker-registry/storagedriver" +) + +// remoteFileReader provides a read seeker interface to files stored in +// storagedriver. Used to implement part of layer interface and will be used +// to implement read side of LayerUpload. +type fileReader struct { + driver storagedriver.StorageDriver + + // identifying fields + path string + size int64 // size is the total layer size, must be set. + + // mutable fields + rc io.ReadCloser // remote read closer + brd *bufio.Reader // internal buffered io + offset int64 // offset is the current read offset + err error // terminal error, if set, reader is closed +} + +func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { + // Grab the size of the layer file, ensuring existence. + size, err := driver.CurrentSize(path) + + if err != nil { + return nil, err + } + + return &fileReader{ + driver: driver, + path: path, + size: int64(size), + }, nil +} + +func (fr *fileReader) Read(p []byte) (n int, err error) { + if fr.err != nil { + return 0, fr.err + } + + rd, err := fr.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + fr.offset += int64(n) + + // Simulate io.EOR error if we reach filesize. + if err == nil && fr.offset >= fr.size { + err = io.EOF + } + + return n, err +} + +func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { + if fr.err != nil { + return 0, fr.err + } + + var err error + newOffset := fr.offset + + switch whence { + case os.SEEK_CUR: + newOffset += int64(offset) + case os.SEEK_END: + newOffset = fr.size + int64(offset) + case os.SEEK_SET: + newOffset = int64(offset) + } + + if newOffset < 0 { + err = fmt.Errorf("cannot seek to negative position") + } else if newOffset > fr.size { + err = fmt.Errorf("cannot seek passed end of file") + } else { + if fr.offset != newOffset { + fr.reset() + } + + // No problems, set the offset. + fr.offset = newOffset + } + + return fr.offset, err +} + +// Close the layer. Should be called when the resource is no longer needed. +func (fr *fileReader) Close() error { + if fr.err != nil { + return fr.err + } + + fr.err = ErrLayerClosed + + // close and release reader chain + if fr.rc != nil { + fr.rc.Close() + } + + fr.rc = nil + fr.brd = nil + + return fr.err +} + +// reader prepares the current reader at the lrs offset, ensuring its buffered +// and ready to go. +func (fr *fileReader) reader() (io.Reader, error) { + if fr.err != nil { + return nil, fr.err + } + + if fr.rc != nil { + return fr.brd, nil + } + + // If we don't have a reader, open one up. + rc, err := fr.driver.ReadStream(fr.path, uint64(fr.offset)) + + if err != nil { + return nil, err + } + + fr.rc = rc + + if fr.brd == nil { + // TODO(stevvooe): Set an optimal buffer size here. We'll have to + // understand the latency characteristics of the underlying network to + // set this correctly, so we may want to leave it to the driver. For + // out of process drivers, we'll have to optimize this buffer size for + // local communication. + fr.brd = bufio.NewReader(fr.rc) + } else { + fr.brd.Reset(fr.rc) + } + + return fr.brd, nil +} + +// resetReader resets the reader, forcing the read method to open up a new +// connection and rebuild the buffered reader. This should be called when the +// offset and the reader will become out of sync, such as during a seek +// operation. +func (fr *fileReader) reset() { + if fr.err != nil { + return + } + if fr.rc != nil { + fr.rc.Close() + fr.rc = nil + } +} diff --git a/storage/filereader_test.go b/storage/filereader_test.go new file mode 100644 index 00000000..cfc9d215 --- /dev/null +++ b/storage/filereader_test.go @@ -0,0 +1,158 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "io" + mrand "math/rand" + "os" + "testing" + + "github.com/docker/docker-registry/digest" + + "github.com/docker/docker-registry/storagedriver/inmemory" +) + +func TestSimpleRead(t *testing.T) { + content := make([]byte, 1<<20) + n, err := rand.Read(content) + if err != nil { + t.Fatalf("unexpected error building random data: %v", err) + } + + if n != len(content) { + t.Fatalf("random read did't fill buffer") + } + + dgst, err := digest.FromReader(bytes.NewReader(content)) + if err != nil { + t.Fatalf("unexpected error digesting random content: %v", err) + } + + driver := inmemory.New() + path := "/random" + + if err := driver.PutContent(path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(driver, path) + if err != nil { + t.Fatalf("error allocating file reader: %v", err) + } + + verifier := digest.NewDigestVerifier(dgst) + io.Copy(verifier, fr) + + if !verifier.Verified() { + t.Fatalf("unable to verify read data") + } +} + +func TestFileReaderSeek(t *testing.T) { + driver := inmemory.New() + pattern := "01234567890ab" // prime length block + repititions := 1024 + path := "/patterned" + content := bytes.Repeat([]byte(pattern), repititions) + + if err := driver.PutContent(path, content); err != nil { + t.Fatalf("error putting patterned content: %v", err) + } + + fr, err := newFileReader(driver, path) + + if err != nil { + t.Fatalf("unexpected error creating file reader: %v", err) + } + + // Seek all over the place, in blocks of pattern size and make sure we get + // the right data. + for _, repitition := range mrand.Perm(repititions - 1) { + targetOffset := int64(len(pattern) * repitition) + // Seek to a multiple of pattern size and read pattern size bytes + offset, err := fr.Seek(targetOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("unexpected error seeking: %v", err) + } + + if offset != targetOffset { + t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) + } + + p := make([]byte, len(pattern)) + + n, err := fr.Read(p) + if err != nil { + t.Fatalf("error reading pattern: %v", err) + } + + if n != len(pattern) { + t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) + } + + if string(p) != pattern { + t.Fatalf("incorrect read content: %q != %q", p, pattern) + } + + // Check offset + current, err := fr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if current != targetOffset+int64(len(pattern)) { + t.Fatalf("unexpected offset after read: %v", err) + } + } + + start, err := fr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatalf("error seeking to start: %v", err) + } + + if start != 0 { + t.Fatalf("expected to seek to start: %v != 0", start) + } + + end, err := fr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatalf("error checking current offset: %v", err) + } + + if end != int64(len(content)) { + t.Fatalf("expected to seek to end: %v != %v", end, len(content)) + } + + // 4. Seek past end and before start, ensure error. + + // seek before start + before, err := fr.Seek(-1, os.SEEK_SET) + if err == nil { + t.Fatalf("error expected, returned offset=%v", before) + } + + after, err := fr.Seek(int64(len(content)+1), os.SEEK_END) + if err == nil { + t.Fatalf("error expected, returned offset=%v", after) + } +} + +// TestLayerReadErrors covers the various error return type for different +// conditions that can arise when reading a layer. +func TestFileReaderErrors(t *testing.T) { + // TODO(stevvooe): We need to cover error return types, driven by the + // errors returned via the HTTP API. For now, here is a incomplete list: + // + // 1. Layer Not Found: returned when layer is not found or access is + // denied. + // 2. Layer Unavailable: returned when link references are unresolved, + // but layer is known to the registry. + // 3. Layer Invalid: This may more split into more errors, but should be + // returned when name or tarsum does not reference a valid error. We + // may also need something to communication layer verification errors + // for the inline tarsum check. + // 4. Timeout: timeouts to backend. Need to better understand these + // failure cases and how the storage driver propagates these errors + // up the stack. +} diff --git a/storage/layer.go b/storage/layer.go index 6c45f401..d2ddfb07 100644 --- a/storage/layer.go +++ b/storage/layer.go @@ -87,4 +87,8 @@ var ( // ErrLayerInvalidLength returned when length check fails. ErrLayerInvalidLength = fmt.Errorf("invalid layer length") + + // ErrLayerClosed returned when an operation is attempted on a closed + // Layer or LayerUpload. + ErrLayerClosed = fmt.Errorf("layer closed") ) diff --git a/storage/layer_test.go b/storage/layer_test.go index 335793d2..03cba9b9 100644 --- a/storage/layer_test.go +++ b/storage/layer_test.go @@ -241,31 +241,6 @@ func TestSimpleLayerRead(t *testing.T) { } } -func TestLayerReaderSeek(t *testing.T) { - // TODO(stevvooe): Ensure that all relative seeks work as advertised. - // Readers must close and re-open on command. This is important to support - // resumable and concurrent downloads via HTTP range requests. -} - -// TestLayerReadErrors covers the various error return type for different -// conditions that can arise when reading a layer. -func TestLayerReadErrors(t *testing.T) { - // TODO(stevvooe): We need to cover error return types, driven by the - // errors returned via the HTTP API. For now, here is a incomplete list: - // - // 1. Layer Not Found: returned when layer is not found or access is - // denied. - // 2. Layer Unavailable: returned when link references are unresolved, - // but layer is known to the registry. - // 3. Layer Invalid: This may more split into more errors, but should be - // returned when name or tarsum does not reference a valid error. We - // may also need something to communication layer verification errors - // for the inline tarsum check. - // 4. Timeout: timeouts to backend. Need to better understand these - // failure cases and how the storage driver propagates these errors - // up the stack. -} - // writeRandomLayer creates a random layer under name and tarSum using driver // and pathMapper. An io.ReadSeeker with the data is returned, along with the // sha256 hex digest. diff --git a/storage/layerreader.go b/storage/layerreader.go index 396940d0..2cc184fd 100644 --- a/storage/layerreader.go +++ b/storage/layerreader.go @@ -1,10 +1,6 @@ package storage import ( - "bufio" - "fmt" - "io" - "os" "time" "github.com/docker/docker-registry/digest" @@ -13,22 +9,11 @@ import ( // layerReadSeeker implements Layer and provides facilities for reading and // seeking. type layerReader struct { - layerStore *layerStore - rc io.ReadCloser - brd *bufio.Reader + fileReader name string // repo name of this layer digest digest.Digest - path string createdAt time.Time - - // offset is the current read offset - offset int64 - - // size is the total layer size, if available. - size int64 - - closedErr error // terminal error, if set, reader is closed } var _ Layer = &layerReader{} @@ -44,131 +29,3 @@ func (lrs *layerReader) Digest() digest.Digest { func (lrs *layerReader) CreatedAt() time.Time { return lrs.createdAt } - -func (lrs *layerReader) Read(p []byte) (n int, err error) { - if err := lrs.closed(); err != nil { - return 0, err - } - - rd, err := lrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - lrs.offset += int64(n) - - // Simulate io.EOR error if we reach filesize. - if err == nil && lrs.offset >= lrs.size { - err = io.EOF - } - - // TODO(stevvooe): More error checking is required here. If the reader - // times out for some reason, we should reset the reader so we re-open the - // connection. - - return n, err -} - -func (lrs *layerReader) Seek(offset int64, whence int) (int64, error) { - if err := lrs.closed(); err != nil { - return 0, err - } - - var err error - newOffset := lrs.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(whence) - case os.SEEK_END: - newOffset = lrs.size + int64(whence) - case os.SEEK_SET: - newOffset = int64(whence) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else if newOffset >= lrs.size { - err = fmt.Errorf("cannot seek passed end of layer") - } else { - if lrs.offset != newOffset { - lrs.resetReader() - } - - // No problems, set the offset. - lrs.offset = newOffset - } - - return lrs.offset, err -} - -// Close the layer. Should be called when the resource is no longer needed. -func (lrs *layerReader) Close() error { - if lrs.closedErr != nil { - return lrs.closedErr - } - // TODO(sday): Must export this error. - lrs.closedErr = fmt.Errorf("layer closed") - - // close and release reader chain - if lrs.rc != nil { - lrs.rc.Close() - lrs.rc = nil - } - lrs.brd = nil - - return lrs.closedErr -} - -// reader prepares the current reader at the lrs offset, ensuring its buffered -// and ready to go. -func (lrs *layerReader) reader() (io.Reader, error) { - if err := lrs.closed(); err != nil { - return nil, err - } - - if lrs.rc != nil { - return lrs.brd, nil - } - - // If we don't have a reader, open one up. - rc, err := lrs.layerStore.driver.ReadStream(lrs.path, uint64(lrs.offset)) - - if err != nil { - return nil, err - } - - lrs.rc = rc - - if lrs.brd == nil { - // TODO(stevvooe): Set an optimal buffer size here. We'll have to - // understand the latency characteristics of the underlying network to - // set this correctly, so we may want to leave it to the driver. For - // out of process drivers, we'll have to optimize this buffer size for - // local communication. - lrs.brd = bufio.NewReader(lrs.rc) - } else { - lrs.brd.Reset(lrs.rc) - } - - return lrs.brd, nil -} - -// resetReader resets the reader, forcing the read method to open up a new -// connection and rebuild the buffered reader. This should be called when the -// offset and the reader will become out of sync, such as during a seek -// operation. -func (lrs *layerReader) resetReader() { - if err := lrs.closed(); err != nil { - return - } - if lrs.rc != nil { - lrs.rc.Close() - lrs.rc = nil - } -} - -func (lrs *layerReader) closed() error { - return lrs.closedErr -} diff --git a/storage/layerstore.go b/storage/layerstore.go index c9662ffd..6abd50e3 100644 --- a/storage/layerstore.go +++ b/storage/layerstore.go @@ -57,33 +57,26 @@ func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { return nil, err } - // Grab the size of the layer file, ensuring that it exists, among other - // things. - size, err := ls.driver.CurrentSize(p) - + fr, err := newFileReader(ls.driver, p) if err != nil { - // TODO(stevvooe): Handle blob/path does not exist here. - // TODO(stevvooe): Get a better understanding of the error cases here - // that don't stem from unknown path. - return nil, err + switch err := err.(type) { + case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: + return nil, ErrLayerUnknown + default: + return nil, err + } } - // Build the layer reader and return to the client. - layer := &layerReader{ - layerStore: ls, - path: p, + return &layerReader{ + fileReader: *fr, name: name, digest: digest, // TODO(stevvooe): Storage backend does not support modification time - // queries yet. Layers "never" change, so just return the zero value. - createdAt: time.Time{}, - - offset: 0, - size: int64(size), - } - - return layer, nil + // queries yet. Layers "never" change, so just return the zero value + // plus a nano-second. + createdAt: (time.Time{}).Add(time.Nanosecond), + }, nil } // Upload begins a layer upload, returning a handle. If the layer upload diff --git a/storage/layerupload.go b/storage/layerupload.go index c07927f1..f134aa19 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -429,6 +429,10 @@ func (llufs *localFSLayerUploadStore) New(name string) (LayerUploadState, error) return lus, err } + if err := llufs.SaveState(lus); err != nil { + return lus, err + } + return lus, nil } diff --git a/storage/services.go b/storage/services.go index dbe5dc75..afb26d94 100644 --- a/storage/services.go +++ b/storage/services.go @@ -15,7 +15,6 @@ type Services struct { // NewServices creates a new Services object to access docker objects stored // in the underlying driver. func NewServices(driver storagedriver.StorageDriver) *Services { - layerUploadStore, err := newTemporaryLocalFSLayerUploadStore() if err != nil { @@ -40,5 +39,5 @@ func NewServices(driver storagedriver.StorageDriver) *Services { // may be context sensitive in the future. The instance should be used similar // to a request local. func (ss *Services) Layers() LayerService { - return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper} + return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper, uploadStore: ss.layerUploadStore} } From 195568017ad6d722a226d0dfcf0183a481cddce3 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 20 Nov 2014 19:15:09 -0800 Subject: [PATCH 063/165] Update error declarations and add missing test This updates API error codes to coincide with changes to the proposal. Mostly, redundant error codes were merged and missing ones were added. The set in the main errors.go file will flow back into the specification. A test case has been added to ensure ErrorCodeUnknown is included in marshaled json. --- errors.go | 43 +++++++++++++++++++++++++++---------------- errors_test.go | 17 +++++++++++++++-- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/errors.go b/errors.go index 9a28e5b6..e2f16ba0 100644 --- a/errors.go +++ b/errors.go @@ -17,20 +17,14 @@ const ( // The following errors can happen during a layer upload. - // ErrorCodeInvalidChecksum is returned when uploading a layer if the - // provided checksum does not match the layer contents. - ErrorCodeInvalidChecksum + // ErrorCodeInvalidDigest is returned when uploading a layer if the + // provided digest does not match the layer contents. + ErrorCodeInvalidDigest // ErrorCodeInvalidLength is returned when uploading a layer if the provided // length does not match the content length. ErrorCodeInvalidLength - // ErrorCodeInvalidTarsum is returned when the provided tarsum does not - // match the computed tarsum of the contents. - ErrorCodeInvalidTarsum - - // The following errors can happen during manifest upload. - // ErrorCodeInvalidName is returned when the name in the manifest does not // match the provided name. ErrorCodeInvalidName @@ -47,6 +41,9 @@ const ( // nonexistent layer. ErrorCodeUnknownLayer + // ErrorCodeUnknownLayerUpload is returned when an upload is accessed. + ErrorCodeUnknownLayerUpload + // ErrorCodeUntrustedSignature is returned when the manifest is signed by an // untrusted source. ErrorCodeUntrustedSignature @@ -54,25 +51,25 @@ const ( var errorCodeStrings = map[ErrorCode]string{ ErrorCodeUnknown: "UNKNOWN", - ErrorCodeInvalidChecksum: "INVALID_CHECKSUM", + ErrorCodeInvalidDigest: "INVALID_DIGEST", ErrorCodeInvalidLength: "INVALID_LENGTH", - ErrorCodeInvalidTarsum: "INVALID_TARSUM", ErrorCodeInvalidName: "INVALID_NAME", ErrorCodeInvalidTag: "INVALID_TAG", ErrorCodeUnverifiedManifest: "UNVERIFIED_MANIFEST", ErrorCodeUnknownLayer: "UNKNOWN_LAYER", + ErrorCodeUnknownLayerUpload: "UNKNOWN_LAYER_UPLOAD", ErrorCodeUntrustedSignature: "UNTRUSTED_SIGNATURE", } var errorCodesMessages = map[ErrorCode]string{ ErrorCodeUnknown: "unknown error", - ErrorCodeInvalidChecksum: "provided checksum did not match uploaded content", + ErrorCodeInvalidDigest: "provided digest did not match uploaded content", ErrorCodeInvalidLength: "provided length did not match content length", - ErrorCodeInvalidTarsum: "provided tarsum did not match binary content", ErrorCodeInvalidName: "Manifest name did not match URI", ErrorCodeInvalidTag: "Manifest tag did not match URI", ErrorCodeUnverifiedManifest: "Manifest failed signature validation", ErrorCodeUnknownLayer: "Referenced layer not available", + ErrorCodeUnknownLayerUpload: "cannot resume unknown layer upload", ErrorCodeUntrustedSignature: "Manifest signed by untrusted source", } @@ -136,7 +133,7 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { - Code ErrorCode `json:"code,omitempty"` + Code ErrorCode `json:"code"` Message string `json:"message,omitempty"` Detail interface{} `json:"detail,omitempty"` } @@ -144,7 +141,7 @@ type Error struct { // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", - strings.Title(strings.Replace(e.Code.String(), "_", " ", -1)), + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), e.Message) } @@ -167,6 +164,10 @@ func (errs *Errors) Push(code ErrorCode, details ...interface{}) { detail = details[0] } + if err, ok := detail.(error); ok { + detail = err.Error() + } + errs.PushErr(Error{ Code: code, Message: code.Message(), @@ -180,7 +181,7 @@ func (errs *Errors) PushErr(err error) { } func (errs *Errors) Error() string { - switch len(errs.Errors) { + switch errs.Len() { case 0: return "" case 1: @@ -194,6 +195,16 @@ func (errs *Errors) Error() string { } } +// Clear clears the errors. +func (errs *Errors) Clear() { + errs.Errors = errs.Errors[:0] +} + +// Len returns the current number of errors. +func (errs *Errors) Len() int { + return len(errs.Errors) +} + // DetailUnknownLayer provides detail for unknown layer errors, returned by // image manifest push for layers that are not yet transferred. This intended // to only be used on the backend to return detail for this specific error. diff --git a/errors_test.go b/errors_test.go index e6ec72f9..709b6ced 100644 --- a/errors_test.go +++ b/errors_test.go @@ -56,7 +56,7 @@ func TestErrorCodes(t *testing.T) { func TestErrorsManagement(t *testing.T) { var errs Errors - errs.Push(ErrorCodeInvalidChecksum) + errs.Push(ErrorCodeInvalidDigest) var detail DetailUnknownLayer detail.Unknown.BlobSum = "sometestblobsumdoesntmatter" @@ -69,7 +69,20 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "{\"errors\":[{\"code\":\"INVALID_CHECKSUM\",\"message\":\"provided checksum did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"Referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" + expectedJSON := "{\"errors\":[{\"code\":\"INVALID_DIGEST\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"Referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + errs.Clear() + errs.Push(ErrorCodeUnknown) + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) From e158e3cd65e4236d758883477fab83fa4fbc7ca9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 20 Nov 2014 19:57:01 -0800 Subject: [PATCH 064/165] Initial implementation of Layer API The http API has its first set of endpoints to implement the core aspects of fetching and uploading layers. Uploads can be started and completed in a single chunk and the content can be fetched via tarsum. Most proposed error conditions should be represented but edge cases likely remain. In this version, note that the layers are still called layers, even though the routes are pointing to blobs. This will change with backend refactoring over the next few weeks. The unit tests are a bit of a shamble but these need to be carefully written along with the core specification process. As the the client-server interaction solidifies, we can port this into a verification suite for registry providers. --- api_test.go | 236 +++++++++++++++++++++++++++++++++++++++++++++++++ app.go | 49 +++++++++- context.go | 9 +- helpers.go | 21 +++++ layer.go | 54 +++++++++-- layerupload.go | 191 +++++++++++++++++++++++++++++++++++---- 6 files changed, 528 insertions(+), 32 deletions(-) create mode 100644 api_test.go diff --git a/api_test.go b/api_test.go new file mode 100644 index 00000000..c850f141 --- /dev/null +++ b/api_test.go @@ -0,0 +1,236 @@ +package registry + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "testing" + + "github.com/Sirupsen/logrus" + _ "github.com/docker/docker-registry/storagedriver/inmemory" + + "github.com/gorilla/handlers" + + "github.com/docker/docker-registry/common/testutil" + "github.com/docker/docker-registry/configuration" + "github.com/docker/docker-registry/digest" +) + +// TestLayerAPI conducts a full of the of the layer api. +func TestLayerAPI(t *testing.T) { + // TODO(stevvooe): This test code is complete junk but it should cover the + // complete flow. This must be broken down and checked against the + // specification *before* we submit the final to docker core. + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + router := v2APIRouter() + + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("error parsing server url: %v", err) + } + + imageName := "foo/bar" + // "build" our layer file + layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random layer file: %v", err) + } + + layerDigest := digest.Digest(tarSumStr) + + // ----------------------------------- + // Test fetch for non-existent content + r, err := router.GetRoute(routeNameBlob).Host(u.Host). + URL("name", imageName, + "digest", tarSumStr) + + resp, err := http.Get(r.String()) + if err != nil { + t.Fatalf("unexpected error fetching non-existent layer: %v", err) + } + + switch resp.StatusCode { + case http.StatusNotFound: + break // expected + default: + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status fetching non-existent layer: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status fetching non-existent layer: %v, %v", resp.StatusCode, resp.Status) + } + + // ------------------------------------------ + // Test head request for non-existent content + resp, err = http.Head(r.String()) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + switch resp.StatusCode { + case http.StatusNotFound: + break // expected + default: + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status checking head on non-existent layer: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status checking head on non-existent layer: %v, %v", resp.StatusCode, resp.Status) + } + + // ------------------------------------------ + // Upload a layer + r, err = router.GetRoute(routeNameBlobUpload).Host(u.Host). + URL("name", imageName) + if err != nil { + t.Fatalf("error starting layer upload: %v", err) + } + + resp, err = http.Post(r.String(), "", nil) + if err != nil { + t.Fatalf("error starting layer upload: %v", err) + } + + if resp.StatusCode != http.StatusAccepted { + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status starting layer upload: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status starting layer upload: %v, %v", resp.StatusCode, resp.Status) + } + + if resp.Header.Get("Location") == "" { // TODO(stevvooe): Need better check here. + t.Fatalf("unexpected Location: %q != %q", resp.Header.Get("Location"), "foo") + } + + if resp.Header.Get("Content-Length") != "0" { + t.Fatalf("unexpected content-length: %q != %q", resp.Header.Get("Content-Length"), "0") + } + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + uploadURLStr := resp.Header.Get("Location") + + // TODO(sday): Cancel the layer upload here and restart. + + query := url.Values{ + "digest": []string{layerDigest.String()}, + "length": []string{fmt.Sprint(layerLength)}, + } + + uploadURL, err := url.Parse(uploadURLStr) + if err != nil { + t.Fatalf("unexpected error parsing url: %v", err) + } + + uploadURL.RawQuery = query.Encode() + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL.String(), layerFile) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error doing put: %v", err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + break // expected + default: + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status putting chunk: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status putting chunk: %v, %v", resp.StatusCode, resp.Status) + } + + if resp.Header.Get("Location") == "" { + t.Fatalf("unexpected Location: %q", resp.Header.Get("Location")) + } + + if resp.Header.Get("Content-Length") != "0" { + t.Fatalf("unexpected content-length: %q != %q", resp.Header.Get("Content-Length"), "0") + } + + layerURL := resp.Header.Get("Location") + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on non-existent layer: %v", err) + } + + switch resp.StatusCode { + case http.StatusOK: + break // expected + default: + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status checking head on layer: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status checking head on layer: %v, %v", resp.StatusCode, resp.Status) + } + + logrus.Infof("fetch the layer") + // ---------------- + // Fetch the layer! + resp, err = http.Get(layerURL) + if err != nil { + t.Fatalf("unexpected error fetching layer: %v", err) + } + + switch resp.StatusCode { + case http.StatusOK: + break // expected + default: + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status fetching layer: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("unexpected status fetching layer: %v, %v", resp.StatusCode, resp.Status) + } + + // Verify the body + verifier := digest.NewDigestVerifier(layerDigest) + io.Copy(verifier, resp.Body) + + if !verifier.Verified() { + d, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Fatalf("unexpected status checking head on layer ayo!: %v, %v", resp.StatusCode, resp.Status) + } + + t.Logf("response:\n%s", string(d)) + t.Fatalf("response body did not pass verification") + } +} diff --git a/app.go b/app.go index bc7df554..25bf572d 100644 --- a/app.go +++ b/app.go @@ -3,7 +3,11 @@ package registry import ( "net/http" + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" + "github.com/docker/docker-registry/configuration" + "github.com/docker/docker-registry/storage" log "github.com/Sirupsen/logrus" "github.com/gorilla/mux" @@ -16,6 +20,12 @@ type App struct { Config configuration.Configuration router *mux.Router + + // driver maintains the app global storage driver instance. + driver storagedriver.StorageDriver + + // services contains the main services instance for the application. + services *storage.Services } // NewApp takes a configuration and returns a configured app, ready to serve @@ -29,11 +39,23 @@ func NewApp(configuration configuration.Configuration) *App { // Register the handler dispatchers. app.register(routeNameImageManifest, imageManifestDispatcher) - app.register(routeNameBlob, layerDispatcher) app.register(routeNameTags, tagsDispatcher) + app.register(routeNameBlob, layerDispatcher) app.register(routeNameBlobUpload, layerUploadDispatcher) app.register(routeNameBlobUploadResume, layerUploadDispatcher) + driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + + if err != nil { + // TODO(stevvooe): Move the creation of a service into a protected + // method, where this is created lazily. Its status can be queried via + // a health check. + panic(err) + } + + app.driver = driver + app.services = storage.NewServices(app.driver) + return app } @@ -64,6 +86,22 @@ type dispatchFunc func(ctx *Context, r *http.Request) http.Handler // TODO(stevvooe): dispatchers should probably have some validation error // chain with proper error reporting. +// singleStatusResponseWriter only allows the first status to be written to be +// the valid request status. The current use case of this class should be +// factored out. +type singleStatusResponseWriter struct { + http.ResponseWriter + status int +} + +func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { + if ssrw.status != 0 { + return + } + ssrw.status = status + ssrw.ResponseWriter.WriteHeader(status) +} + // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { @@ -80,14 +118,17 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.log = log.WithField("name", context.Name) handler := dispatch(context, r) + ssrw := &singleStatusResponseWriter{ResponseWriter: w} context.log.Infoln("handler", resolveHandlerName(r.Method, handler)) - handler.ServeHTTP(w, r) + handler.ServeHTTP(ssrw, r) // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). - if len(context.Errors.Errors) > 0 { - w.WriteHeader(http.StatusBadRequest) + if context.Errors.Len() > 0 { + if ssrw.status == 0 { + w.WriteHeader(http.StatusBadRequest) + } serveJSON(w, context.Errors) } }) diff --git a/context.go b/context.go index a5706b4e..c246d6ac 100644 --- a/context.go +++ b/context.go @@ -1,8 +1,6 @@ package registry -import ( - "github.com/Sirupsen/logrus" -) +import "github.com/Sirupsen/logrus" // Context should contain the request specific context for use in across // handlers. Resources that don't need to be shared across handlers should not @@ -20,11 +18,6 @@ type Context struct { // handler *must not* start the response via http.ResponseWriter. Errors Errors - // TODO(stevvooe): Context would be a good place to create a - // representation of the "authorized resource". Perhaps, rather than - // having fields like "name", the context should be a set of parameters - // then we do routing from there. - // vars contains the extracted gorilla/mux variables that can be used for // assignment. vars map[string]string diff --git a/helpers.go b/helpers.go index b3b9d744..7714d029 100644 --- a/helpers.go +++ b/helpers.go @@ -2,7 +2,10 @@ package registry import ( "encoding/json" + "io" "net/http" + + "github.com/gorilla/mux" ) // serveJSON marshals v and sets the content-type header to @@ -18,3 +21,21 @@ func serveJSON(w http.ResponseWriter, v interface{}) error { return nil } + +// closeResources closes all the provided resources after running the target +// handler. +func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, closer := range closers { + defer closer.Close() + } + handler.ServeHTTP(w, r) + }) +} + +// clondedRoute returns a clone of the named route from the router. +func clonedRoute(router *mux.Router, name string) *mux.Route { + route := new(mux.Route) + *route = *router.GetRoute(name) // clone the route + return route +} diff --git a/layer.go b/layer.go index 82a1e6d9..38fdfe39 100644 --- a/layer.go +++ b/layer.go @@ -3,17 +3,28 @@ package registry import ( "net/http" + "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" + "github.com/gorilla/mux" ) // layerDispatcher uses the request context to build a layerHandler. func layerDispatcher(ctx *Context, r *http.Request) http.Handler { - layerHandler := &layerHandler{ - Context: ctx, - TarSum: ctx.vars["tarsum"], + dgst, err := digest.ParseDigest(ctx.vars["digest"]) + + if err != nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx.Errors.Push(ErrorCodeInvalidDigest, err) + }) } - layerHandler.log = layerHandler.log.WithField("tarsum", layerHandler.TarSum) + layerHandler := &layerHandler{ + Context: ctx, + Digest: dgst, + } + + layerHandler.log = layerHandler.log.WithField("digest", dgst) return handlers.MethodHandler{ "GET": http.HandlerFunc(layerHandler.GetLayer), @@ -25,11 +36,44 @@ func layerDispatcher(ctx *Context, r *http.Request) http.Handler { type layerHandler struct { *Context - TarSum string + Digest digest.Digest } // GetLayer fetches the binary data from backend storage returns it in the // response. func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { + layers := lh.services.Layers() + layer, err := layers.Fetch(lh.Name, lh.Digest) + + if err != nil { + switch err { + case storage.ErrLayerUnknown: + w.WriteHeader(http.StatusNotFound) + lh.Errors.Push(ErrorCodeUnknownLayer, + map[string]interface{}{ + "unknown": FSLayer{BlobSum: lh.Digest}, + }) + return + default: + lh.Errors.Push(ErrorCodeUnknown, err) + return + } + } + defer layer.Close() + + http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) +} + +func buildLayerURL(router *mux.Router, r *http.Request, layer storage.Layer) (string, error) { + route := clonedRoute(router, routeNameBlob) + + layerURL, err := route.Schemes(r.URL.Scheme).Host(r.Host). + URL("name", layer.Name(), + "digest", layer.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil } diff --git a/layerupload.go b/layerupload.go index 8916b552..d1ec4206 100644 --- a/layerupload.go +++ b/layerupload.go @@ -1,64 +1,225 @@ package registry import ( + "fmt" + "io" "net/http" + "strconv" + "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" + "github.com/gorilla/mux" ) // layerUploadDispatcher constructs and returns the layer upload handler for // the given request context. func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { - layerUploadHandler := &layerUploadHandler{ + luh := &layerUploadHandler{ Context: ctx, - TarSum: ctx.vars["tarsum"], UUID: ctx.vars["uuid"], } - layerUploadHandler.log = layerUploadHandler.log.WithField("tarsum", layerUploadHandler.TarSum) + handler := http.Handler(handlers.MethodHandler{ + "POST": http.HandlerFunc(luh.StartLayerUpload), + "GET": http.HandlerFunc(luh.GetUploadStatus), + "HEAD": http.HandlerFunc(luh.GetUploadStatus), + "PUT": http.HandlerFunc(luh.PutLayerChunk), + "DELETE": http.HandlerFunc(luh.CancelLayerUpload), + }) - if layerUploadHandler.UUID != "" { - layerUploadHandler.log = layerUploadHandler.log.WithField("uuid", layerUploadHandler.UUID) + if luh.UUID != "" { + luh.log = luh.log.WithField("uuid", luh.UUID) + + layers := ctx.services.Layers() + upload, err := layers.Resume(luh.UUID) + + if err != nil && err != storage.ErrLayerUploadUnknown { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logrus.Infof("error resolving upload: %v", err) + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(ErrorCodeUnknown, err) + }) + } + + luh.Upload = upload + handler = closeResources(handler, luh.Upload) } - return handlers.MethodHandler{ - "POST": http.HandlerFunc(layerUploadHandler.StartLayerUpload), - "GET": http.HandlerFunc(layerUploadHandler.GetUploadStatus), - "HEAD": http.HandlerFunc(layerUploadHandler.GetUploadStatus), - "PUT": http.HandlerFunc(layerUploadHandler.PutLayerChunk), - "DELETE": http.HandlerFunc(layerUploadHandler.CancelLayerUpload), - } + return handler } // layerUploadHandler handles the http layer upload process. type layerUploadHandler struct { *Context - // TarSum is the unique identifier of the layer being uploaded. - TarSum string - // UUID identifies the upload instance for the current request. UUID string + + Upload storage.LayerUpload } // StartLayerUpload begins the layer upload process and allocates a server- // side upload session. func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.Request) { + layers := luh.services.Layers() + upload, err := layers.Upload(luh.Name) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(ErrorCodeUnknown, err) + return + } + luh.Upload = upload + defer luh.Upload.Close() + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(ErrorCodeUnknown, err) + return + } + w.WriteHeader(http.StatusAccepted) } // GetUploadStatus returns the status of a given upload, identified by uuid. func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(ErrorCodeUnknownLayerUpload) + } + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(ErrorCodeUnknown, err) + return + } + + w.WriteHeader(http.StatusNoContent) } // PutLayerChunk receives a layer chunk during the layer upload process, // possible completing the upload with a checksum and length. func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(ErrorCodeUnknownLayerUpload) + } + var finished bool + + // TODO(stevvooe): This is woefully incomplete. Missing stuff: + // + // 1. Extract information from range header, if present. + // 2. Check offset of current layer. + // 3. Emit correct error responses. + + // Read in the chunk + io.Copy(luh.Upload, r.Body) + + if err := luh.maybeCompleteUpload(w, r); err != nil { + if err != errNotReadyToComplete { + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(ErrorCodeUnknown, err) + return + } + } + + if err := luh.layerUploadResponse(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) // Error conditions here? + luh.Errors.Push(ErrorCodeUnknown, err) + return + } + + if finished { + w.WriteHeader(http.StatusCreated) + } else { + w.WriteHeader(http.StatusAccepted) + } } // CancelLayerUpload cancels an in-progress upload of a layer. func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { + if luh.Upload == nil { + w.WriteHeader(http.StatusNotFound) + luh.Errors.Push(ErrorCodeUnknownLayerUpload) + } } + +// layerUploadResponse provides a standard request for uploading layers and +// chunk responses. This sets the correct headers but the response status is +// left to the caller. +func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { + uploadURL, err := buildLayerUploadURL(luh.router, r, luh.Upload) + if err != nil { + logrus.Infof("error building upload url: %s", err) + return err + } + + w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Range", fmt.Sprintf("0-%d", luh.Upload.Offset())) + + return nil +} + +var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") + +// maybeCompleteUpload tries to complete the upload if the correct parameters +// are available. Returns errNotReadyToComplete if not ready to complete. +func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { + // If we get a digest and length, we can finish the upload. + dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! + sizeStr := r.FormValue("length") + + if dgstStr == "" || sizeStr == "" { + return errNotReadyToComplete + } + + dgst, err := digest.ParseDigest(dgstStr) + if err != nil { + return err + } + + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return err + } + + luh.completeUpload(w, r, size, dgst) + return nil +} + +// completeUpload finishes out the upload with the correct response. +func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { + layer, err := luh.Upload.Finish(size, dgst) + if err != nil { + luh.Errors.Push(ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + layerURL, err := buildLayerURL(luh.router, r, layer) + if err != nil { + luh.Errors.Push(ErrorCodeUnknown, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Location", layerURL) + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusCreated) +} + +func buildLayerUploadURL(router *mux.Router, r *http.Request, upload storage.LayerUpload) (string, error) { + route := clonedRoute(router, routeNameBlobUploadResume) + + uploadURL, err := route.Schemes(r.URL.Scheme).Host(r.Host). + URL("name", upload.Name(), "uuid", upload.UUID()) + if err != nil { + return "", err + } + + return uploadURL.String(), nil +} From 4bbabc6e36006c165106ab4d7ffb342ec9263883 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 21 Nov 2014 17:04:35 -0800 Subject: [PATCH 065/165] Implement path spec for manifest storage --- storage/paths.go | 28 +++++++++++++++++++++------- storage/paths_test.go | 7 +++++++ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/storage/paths.go b/storage/paths.go index 18aef17e..87c0b2fd 100644 --- a/storage/paths.go +++ b/storage/paths.go @@ -24,7 +24,7 @@ const storagePathVersion = "v2" // /v2 // -> repositories/ // ->/ -// -> images/ +// -> manifests/ // // -> layers/ // -> tarsum/ @@ -48,6 +48,7 @@ const storagePathVersion = "v2" // // We cover the path formats implemented by this path mapper below. // +// manifestPathSpec: /v2/repositories//manifests/ // layerLinkPathSpec: /v2/repositories//layers/tarsum/// // layerIndexLinkPathSpec: /v2/layerindex/tarsum/// // blobPathSpec: /v2/blob/sha256// @@ -84,7 +85,13 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { // to an intermediate path object, than can be consumed and mapped by the // other version. + rootPrefix := []string{pm.root, pm.version} + repoPrefix := append(rootPrefix, "repositories") + switch v := spec.(type) { + case manifestPathSpec: + // TODO(sday): May need to store manifest by architecture. + return path.Join(append(repoPrefix, v.name, "manifests", v.tag)...), nil case layerLinkPathSpec: if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { // Only tarsum is supported, for now @@ -101,9 +108,8 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return "", err } - p := path.Join(append([]string{pm.root, pm.version, "repositories", v.name, "layers"}, tarSumInfoPathComponents(tsi)...)...) - - return p, nil + return path.Join(append(append(repoPrefix, v.name, "layers"), + tarSumInfoPathComponents(tsi)...)...), nil case layerIndexLinkPathSpec: if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { // Only tarsum is supported, for now @@ -120,9 +126,8 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { return "", err } - p := path.Join(append([]string{pm.root, pm.version, "layerindex"}, tarSumInfoPathComponents(tsi)...)...) - - return p, nil + return path.Join(append(append(rootPrefix, "layerindex"), + tarSumInfoPathComponents(tsi)...)...), nil case blobPathSpec: p := path.Join([]string{pm.root, pm.version, "blob", v.alg, v.digest[:2], v.digest}...) return p, nil @@ -139,6 +144,15 @@ type pathSpec interface { pathSpec() } +// manifestPathSpec describes the path elements used to build a manifest path. +// The contents should be a signed manifest json file. +type manifestPathSpec struct { + name string + tag string +} + +func (manifestPathSpec) pathSpec() {} + // layerLink specifies a path for a layer link, which is a file with a blob // id. The layer link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: diff --git a/storage/paths_test.go b/storage/paths_test.go index 5dc4c07c..d2ff542f 100644 --- a/storage/paths_test.go +++ b/storage/paths_test.go @@ -16,6 +16,13 @@ func TestPathMapper(t *testing.T) { expected string err error }{ + { + spec: manifestPathSpec{ + name: "foo/bar", + tag: "thetag", + }, + expected: "/pathmapper-test/repositories/foo/bar/manifests/thetag", + }, { spec: layerLinkPathSpec{ name: "foo/bar", From eaadb82e1e45547726d53c8c36a34efdc6015024 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 21 Nov 2014 19:29:08 -0800 Subject: [PATCH 066/165] Move Manifest type into storage package This changeset move the Manifest type into the storage package to make the type accessible to client and registry without import cycles. The structure of the manifest was also changed to accuratle reflect the stages of the signing process. A straw man Manifest.Sign method has been added to start testing this concept out but will probably be accompanied by the more import SignedManifest.Verify method as the security model develops. This is probably the start of a concerted effort to consolidate types across the client and server portions of the code base but we may want to see how such a handy type, like the Manifest and SignedManifest, would work in docker core. --- client/client.go | 11 ++-- client/client_test.go | 58 +++++++++++--------- client/objectstore.go | 12 ++-- client/pull.go | 4 +- client/push.go | 6 +- errors.go | 2 +- images.go | 67 +--------------------- layer.go | 2 +- storage/manifest.go | 125 ++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 179 insertions(+), 108 deletions(-) create mode 100644 storage/manifest.go diff --git a/client/client.go b/client/client.go index 944050e0..e51476cd 100644 --- a/client/client.go +++ b/client/client.go @@ -12,17 +12,18 @@ import ( "github.com/docker/docker-registry" "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" ) // Client implements the client interface to the registry http api type Client interface { // GetImageManifest returns an image manifest for the image at the given // name, tag pair. - GetImageManifest(name, tag string) (*registry.ImageManifest, error) + GetImageManifest(name, tag string) (*storage.SignedManifest, error) // PutImageManifest uploads an image manifest for the image at the given // name, tag pair. - PutImageManifest(name, tag string, imageManifest *registry.ImageManifest) error + PutImageManifest(name, tag string, imageManifest *storage.SignedManifest) error // DeleteImage removes the image at the given name, tag pair. DeleteImage(name, tag string) error @@ -81,7 +82,7 @@ type clientImpl struct { // TODO(bbland): use consistent route generation between server and client -func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest, error) { +func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest, error) { response, err := http.Get(r.imageManifestURL(name, tag)) if err != nil { return nil, err @@ -108,7 +109,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest decoder := json.NewDecoder(response.Body) - manifest := new(registry.ImageManifest) + manifest := new(storage.SignedManifest) err = decoder.Decode(manifest) if err != nil { return nil, err @@ -116,7 +117,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*registry.ImageManifest return manifest, nil } -func (r *clientImpl) PutImageManifest(name, tag string, manifest *registry.ImageManifest) error { +func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.SignedManifest) error { manifestBytes, err := json.Marshal(manifest) if err != nil { return err diff --git a/client/client_test.go b/client/client_test.go index a77e7665..dc75789d 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -9,9 +9,9 @@ import ( "sync" "testing" - "github.com/docker/docker-registry" "github.com/docker/docker-registry/common/testutil" "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" ) type testBlob struct { @@ -33,8 +33,8 @@ func TestPush(t *testing.T) { }, } uploadLocations := make([]string, len(testBlobs)) - blobs := make([]registry.FSLayer, len(testBlobs)) - history := make([]registry.ManifestHistory, len(testBlobs)) + blobs := make([]storage.FSLayer, len(testBlobs)) + history := make([]storage.ManifestHistory, len(testBlobs)) for i, blob := range testBlobs { // TODO(bbland): this is returning the same location for all uploads, @@ -42,17 +42,21 @@ func TestPush(t *testing.T) { // It's sort of okay because we're using unique digests, but this needs // to change at some point. uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name) - blobs[i] = registry.FSLayer{BlobSum: blob.digest} - history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()} + blobs[i] = storage.FSLayer{BlobSum: blob.digest} + history[i] = storage.ManifestHistory{V1Compatibility: blob.digest.String()} } - manifest := ®istry.ImageManifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - SchemaVersion: 1, + manifest := &storage.SignedManifest{ + Manifest: storage.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: storage.Versioned{ + SchemaVersion: 1, + }, + }, } manifestBytes, err := json.Marshal(manifest) @@ -102,7 +106,7 @@ func TestPush(t *testing.T) { client := New(server.URL) objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), - manifestStorage: make(map[string]*registry.ImageManifest), + manifestStorage: make(map[string]*storage.SignedManifest), layerStorage: make(map[digest.Digest]Layer), } @@ -142,21 +146,25 @@ func TestPull(t *testing.T) { contents: []byte("some other contents"), }, } - blobs := make([]registry.FSLayer, len(testBlobs)) - history := make([]registry.ManifestHistory, len(testBlobs)) + blobs := make([]storage.FSLayer, len(testBlobs)) + history := make([]storage.ManifestHistory, len(testBlobs)) for i, blob := range testBlobs { - blobs[i] = registry.FSLayer{BlobSum: blob.digest} - history[i] = registry.ManifestHistory{V1Compatibility: blob.digest.String()} + blobs[i] = storage.FSLayer{BlobSum: blob.digest} + history[i] = storage.ManifestHistory{V1Compatibility: blob.digest.String()} } - manifest := ®istry.ImageManifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: blobs, - History: history, - SchemaVersion: 1, + manifest := &storage.SignedManifest{ + Manifest: storage.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: blobs, + History: history, + Versioned: storage.Versioned{ + SchemaVersion: 1, + }, + }, } manifestBytes, err := json.Marshal(manifest) @@ -190,7 +198,7 @@ func TestPull(t *testing.T) { client := New(server.URL) objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), - manifestStorage: make(map[string]*registry.ImageManifest), + manifestStorage: make(map[string]*storage.SignedManifest), layerStorage: make(map[digest.Digest]Layer), } diff --git a/client/objectstore.go b/client/objectstore.go index bee73ff0..177f9aca 100644 --- a/client/objectstore.go +++ b/client/objectstore.go @@ -8,8 +8,8 @@ import ( "io/ioutil" "sync" - "github.com/docker/docker-registry" "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" ) var ( @@ -28,11 +28,11 @@ var ( type ObjectStore interface { // Manifest retrieves the image manifest stored at the given repository name // and tag - Manifest(name, tag string) (*registry.ImageManifest, error) + Manifest(name, tag string) (*storage.SignedManifest, error) // WriteManifest stores an image manifest at the given repository name and // tag - WriteManifest(name, tag string, manifest *registry.ImageManifest) error + WriteManifest(name, tag string, manifest *storage.SignedManifest) error // Layer returns a handle to a layer for reading and writing Layer(dgst digest.Digest) (Layer, error) @@ -56,11 +56,11 @@ type Layer interface { // memoryObjectStore is an in-memory implementation of the ObjectStore interface type memoryObjectStore struct { mutex *sync.Mutex - manifestStorage map[string]*registry.ImageManifest + manifestStorage map[string]*storage.SignedManifest layerStorage map[digest.Digest]Layer } -func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageManifest, error) { +func (objStore *memoryObjectStore) Manifest(name, tag string) (*storage.SignedManifest, error) { objStore.mutex.Lock() defer objStore.mutex.Unlock() @@ -71,7 +71,7 @@ func (objStore *memoryObjectStore) Manifest(name, tag string) (*registry.ImageMa return manifest, nil } -func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *registry.ImageManifest) error { +func (objStore *memoryObjectStore) WriteManifest(name, tag string, manifest *storage.SignedManifest) error { objStore.mutex.Lock() defer objStore.mutex.Unlock() diff --git a/client/pull.go b/client/pull.go index bce06756..435e40b9 100644 --- a/client/pull.go +++ b/client/pull.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - "github.com/docker/docker-registry" + "github.com/docker/docker-registry/storage" log "github.com/Sirupsen/logrus" ) @@ -77,7 +77,7 @@ func Pull(c Client, objectStore ObjectStore, name, tag string) error { return nil } -func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry.FSLayer) error { +func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error { log.WithField("layer", fsLayer).Info("Pulling layer") layer, err := objectStore.Layer(fsLayer.BlobSum) diff --git a/client/push.go b/client/push.go index 08726058..c0ff10d1 100644 --- a/client/push.go +++ b/client/push.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - "github.com/docker/docker-registry" + "github.com/docker/docker-registry/storage" log "github.com/Sirupsen/logrus" ) @@ -15,7 +15,7 @@ import ( // push window has been successfully pushed. const simultaneousLayerPushWindow = 4 -type pushFunction func(fsLayer registry.FSLayer) error +type pushFunction func(fsLayer storage.FSLayer) error // Push implements a client push workflow for the image defined by the given // name and tag pair, using the given ObjectStore for local manifest and layer @@ -74,7 +74,7 @@ func Push(c Client, objectStore ObjectStore, name, tag string) error { return nil } -func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry.FSLayer) error { +func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.FSLayer) error { log.WithField("layer", fsLayer).Info("Pushing layer") layer, err := objectStore.Layer(fsLayer.BlobSum) diff --git a/errors.go b/errors.go index e2f16ba0..b6170430 100644 --- a/errors.go +++ b/errors.go @@ -212,7 +212,7 @@ type DetailUnknownLayer struct { // Unknown should contain the contents of a layer descriptor, which is a // single FSLayer currently. - Unknown FSLayer `json:"unknown"` + Unknown storage.FSLayer `json:"unknown"` } // RepositoryNotFoundError is returned when making an operation against a diff --git a/images.go b/images.go index 534069b2..317651e2 100644 --- a/images.go +++ b/images.go @@ -2,76 +2,13 @@ package registry import ( "encoding/json" + "fmt" "net/http" - "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) -// ImageManifest defines the structure of an image manifest -type ImageManifest struct { - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []ManifestHistory `json:"history"` - - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // Raw is the byte representation of the ImageManifest, used for signature - // verification - Raw []byte `json:"-"` -} - -// imageManifest is used to avoid recursion in unmarshaling -type imageManifest ImageManifest - -// UnmarshalJSON populates a new ImageManifest struct from JSON data. -func (m *ImageManifest) UnmarshalJSON(b []byte) error { - var manifest imageManifest - err := json.Unmarshal(b, &manifest) - if err != nil { - return err - } - - *m = ImageManifest(manifest) - m.Raw = b - return nil -} - -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// ManifestHistory stores unstructured v1 compatibility information -type ManifestHistory struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// Checksum is a container struct for an image checksum -type Checksum struct { - // HashAlgorithm is the algorithm used to compute the checksum - // Supported values: md5, sha1, sha256, sha512 - HashAlgorithm string - - // Sum is the actual checksum value for the given HashAlgorithm - Sum string -} - // imageManifestDispatcher takes the request context and builds the // appropriate handler for handling image manifest requests. func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { diff --git a/layer.go b/layer.go index 38fdfe39..5e1c6f45 100644 --- a/layer.go +++ b/layer.go @@ -52,7 +52,7 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) lh.Errors.Push(ErrorCodeUnknownLayer, map[string]interface{}{ - "unknown": FSLayer{BlobSum: lh.Digest}, + "unknown": storage.FSLayer{BlobSum: lh.Digest}, }) return default: diff --git a/storage/manifest.go b/storage/manifest.go new file mode 100644 index 00000000..9921fbea --- /dev/null +++ b/storage/manifest.go @@ -0,0 +1,125 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/libtrust" + + "github.com/docker/docker-registry/digest" +) + +var ( + // ErrManifestUnknown is returned if the manifest is not known by the + // registry. + ErrManifestUnknown = fmt.Errorf("unknown manifest") + + // ErrManifestUnverified is returned when the registry is unable to verify + // the manifest. + ErrManifestUnverified = fmt.Errorf("unverified manifest") +) + +// Versioned provides a struct with just the manifest schemaVersion. Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []ManifestHistory `json:"history"` +} + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func (m *Manifest) Sign(pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.Marshal(m) + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + Raw: pretty, + }, nil +} + +// SignedManifest provides an envelope for +type SignedManifest struct { + Manifest + + // Raw is the byte representation of the ImageManifest, used for signature + // verification. The manifest byte representation cannot change or it will + // have to be re-signed. + Raw []byte `json:"-"` +} + +// UnmarshalJSON populates a new ImageManifest struct from JSON data. +func (m *SignedManifest) UnmarshalJSON(b []byte) error { + var manifest Manifest + if err := json.Unmarshal(b, &manifest); err != nil { + return err + } + + m.Manifest = manifest + m.Raw = b + + return nil +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. +func (m *SignedManifest) MarshalJSON() ([]byte, error) { + if len(m.Raw) > 0 { + return m.Raw, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&m.Manifest) +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// ManifestHistory stores unstructured v1 compatibility information +type ManifestHistory struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} From ebab275b76545d05c4c488965a7b146ac10c4855 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Fri, 21 Nov 2014 14:24:22 -0800 Subject: [PATCH 067/165] Fix goverall Also simplified things a bit + adding notes about extra tooling. --- .drone.yml | 24 +++++++++++++++++++----- project/dev-image/Dockerfile | 3 ++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.drone.yml b/.drone.yml index 60ea41da..d943e19f 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,14 +1,28 @@ image: dmp42/go:stable script: + # To be spoofed back into the test image + - go get github.com/modocache/gover + - go get -t ./... - - FAIL=$(find ./ -iname "*.go" -exec gofmt -s -l {} \;) && echo "$FAIL" && test -z "$FAIL" + # Go fmt + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + # Go lint + - test -z "$(golint ./... | tee /dev/stderr)" + # Go vet - go vet ./... - - FAIL=$(golint ./...) && echo "$FAIL" && test -z "$FAIL" - - go test -v ./... - - goveralls -v -service drone.io -repotoken {{COVERALLS_TOKEN}} - # - go build --tags SOMETAG + # Go test + - go test -v -race -cover ./... + # Helper to concatenate reports + - gover + # Send to coverall + - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} + + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck notify: email: diff --git a/project/dev-image/Dockerfile b/project/dev-image/Dockerfile index f77af95c..017c1a21 100644 --- a/project/dev-image/Dockerfile +++ b/project/dev-image/Dockerfile @@ -6,6 +6,7 @@ ENV GOROOT /usr/local/go ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin ENV LANG C +ENG LC_ALL C RUN apt-get update && apt-get install -y \ wget ca-certificates git mercurial bzr \ @@ -16,4 +17,4 @@ RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ rm go${GOLANG_VERSION}.linux-amd64.tar.gz -RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint \ No newline at end of file +RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint From 4635a1e903c8e551a20ac04b1c311457fdb4ca7e Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Sat, 22 Nov 2014 23:23:03 +0300 Subject: [PATCH 068/165] [IPC] Tiny cleaning --- storagedriver/ipc/client.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 08f7b800..c77797eb 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -423,7 +423,6 @@ func (driver *StorageDriverClient) handleSubprocessExit() { func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, response interface{}) error { receiveChan := make(chan error, 1) go func(receiver libchan.Receiver, receiveChan chan<- error) { - defer close(receiveChan) receiveChan <- receiver.Receive(response) }(receiver, receiveChan) @@ -432,9 +431,6 @@ func (driver *StorageDriverClient) receiveResponse(receiver libchan.Receiver, re select { case err = <-receiveChan: case err, ok = <-driver.exitChan: - go func(receiveChan <-chan error) { - <-receiveChan - }(receiveChan) if !ok { err = driver.exitErr } From 4decfaa82e6e3b9754b436910670d75b4eca77e7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 21 Nov 2014 19:39:52 -0800 Subject: [PATCH 069/165] Initial implementation of image manifest storage This change implements the first pass at image manifest storage on top of the storagedriver. Very similar to LayerService, its much simpler due to less complexity of pushing and pulling images. Various components are still missing, such as detailed error reporting on missing layers during verification, but the base functionality is present. --- errors.go | 1 + images.go | 3 - storage/layer.go | 17 ----- storage/manifest_test.go | 139 +++++++++++++++++++++++++++++++++++++++ storage/manifeststore.go | 134 +++++++++++++++++++++++++++++++++++++ storage/services.go | 40 +++++++++++ 6 files changed, 314 insertions(+), 20 deletions(-) create mode 100644 storage/manifest_test.go create mode 100644 storage/manifeststore.go diff --git a/errors.go b/errors.go index b6170430..113097dd 100644 --- a/errors.go +++ b/errors.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" ) // ErrorCode represents the error type. The errors are serialized via strings diff --git a/images.go b/images.go index 317651e2..f16a3560 100644 --- a/images.go +++ b/images.go @@ -1,11 +1,8 @@ package registry import ( - "encoding/json" - "fmt" "net/http" - "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) diff --git a/storage/layer.go b/storage/layer.go index d2ddfb07..dc6b3422 100644 --- a/storage/layer.go +++ b/storage/layer.go @@ -8,23 +8,6 @@ import ( "github.com/docker/docker-registry/digest" ) -// LayerService provides operations on layer files in a backend storage. -type LayerService interface { - // Exists returns true if the layer exists. - Exists(name string, digest digest.Digest) (bool, error) - - // Fetch the layer identifed by TarSum. - Fetch(name string, digest digest.Digest) (Layer, error) - - // Upload begins a layer upload to repository identified by name, - // returning a handle. - Upload(name string) (LayerUpload, error) - - // Resume continues an in progress layer upload, returning the current - // state of the upload. - Resume(uuid string) (LayerUpload, error) -} - // Layer provides a readable and seekable layer object. Typically, // implementations are *not* goroutine safe. type Layer interface { diff --git a/storage/manifest_test.go b/storage/manifest_test.go new file mode 100644 index 00000000..c96c1dec --- /dev/null +++ b/storage/manifest_test.go @@ -0,0 +1,139 @@ +package storage + +import ( + "reflect" + "testing" + + "github.com/docker/libtrust" + + "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storagedriver/inmemory" +) + +func TestManifestStorage(t *testing.T) { + driver := inmemory.New() + ms := &manifestStore{ + driver: driver, + pathMapper: &pathMapper{ + root: "/storage/testing", + version: storagePathVersion, + }, + layerService: newMockedLayerService(), + } + + name := "foo/bar" + tag := "thetag" + + exists, err := ms.Exists(name, tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if exists { + t.Fatalf("manifest should not exist") + } + + if _, err := ms.Get(name, tag); err != ErrManifestUnknown { + t.Fatalf("expected manifest unknown error: %v != %v", err, ErrManifestUnknown) + } + + manifest := Manifest{ + Versioned: Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + FSLayers: []FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + sm, err := manifest.Sign(pk) + if err != nil { + t.Fatalf("error signing manifest: %v", err) + } + + err = ms.Put(name, tag, sm) + if err == nil { + t.Fatalf("expected errors putting manifest") + } + + // TODO(stevvooe): We expect errors describing all of the missing layers. + + ms.layerService.(*mockedExistenceLayerService).add(name, "asdf") + ms.layerService.(*mockedExistenceLayerService).add(name, "qwer") + + if err = ms.Put(name, tag, sm); err != nil { + t.Fatalf("unexpected error putting manifest: %v", err) + } + + exists, err = ms.Exists(name, tag) + if err != nil { + t.Fatalf("unexpected error checking manifest existence: %v", err) + } + + if !exists { + t.Fatalf("manifest should exist") + } + + fetchedManifest, err := ms.Get(name, tag) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + + if !reflect.DeepEqual(fetchedManifest, sm) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) + } +} + +type layerKey struct { + name string + digest digest.Digest +} + +type mockedExistenceLayerService struct { + exists map[layerKey]struct{} +} + +func newMockedLayerService() *mockedExistenceLayerService { + return &mockedExistenceLayerService{ + exists: make(map[layerKey]struct{}), + } +} + +var _ LayerService = &mockedExistenceLayerService{} + +func (mels *mockedExistenceLayerService) add(name string, digest digest.Digest) { + mels.exists[layerKey{name: name, digest: digest}] = struct{}{} +} + +func (mels *mockedExistenceLayerService) remove(name string, digest digest.Digest) { + delete(mels.exists, layerKey{name: name, digest: digest}) +} + +func (mels *mockedExistenceLayerService) Exists(name string, digest digest.Digest) (bool, error) { + _, ok := mels.exists[layerKey{name: name, digest: digest}] + return ok, nil +} + +func (mockedExistenceLayerService) Fetch(name string, digest digest.Digest) (Layer, error) { + panic("not implemented") +} + +func (mockedExistenceLayerService) Upload(name string) (LayerUpload, error) { + panic("not implemented") +} + +func (mockedExistenceLayerService) Resume(uuid string) (LayerUpload, error) { + panic("not implemented") +} diff --git a/storage/manifeststore.go b/storage/manifeststore.go new file mode 100644 index 00000000..1b76c8c0 --- /dev/null +++ b/storage/manifeststore.go @@ -0,0 +1,134 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/libtrust" + + "github.com/docker/docker-registry/storagedriver" +) + +type manifestStore struct { + driver storagedriver.StorageDriver + pathMapper *pathMapper + layerService LayerService +} + +var _ ManifestService = &manifestStore{} + +func (ms *manifestStore) Exists(name, tag string) (bool, error) { + p, err := ms.path(name, tag) + if err != nil { + return false, err + } + + size, err := ms.driver.CurrentSize(p) + if err != nil { + return false, err + } + + if size == 0 { + return false, nil + } + + return true, nil +} + +func (ms *manifestStore) Get(name, tag string) (*SignedManifest, error) { + p, err := ms.path(name, tag) + if err != nil { + return nil, err + } + + content, err := ms.driver.GetContent(p) + if err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: + return nil, ErrManifestUnknown + default: + return nil, err + } + } + + var manifest SignedManifest + + if err := json.Unmarshal(content, &manifest); err != nil { + // TODO(stevvooe): Corrupted manifest error? + return nil, err + } + + // TODO(stevvooe): Verify the manifest here? + + return &manifest, nil +} + +func (ms *manifestStore) Put(name, tag string, manifest *SignedManifest) error { + p, err := ms.path(name, tag) + if err != nil { + return err + } + + if err := ms.verifyManifest(name, tag, manifest); err != nil { + return err + } + + // TODO(stevvooe): Should we get manifest first? + + return ms.driver.PutContent(p, manifest.Raw) +} + +func (ms *manifestStore) Delete(name, tag string) error { + panic("not implemented") +} + +func (ms *manifestStore) path(name, tag string) (string, error) { + return ms.pathMapper.path(manifestPathSpec{ + name: name, + tag: tag, + }) +} + +func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManifest) error { + if manifest.Name != name { + return fmt.Errorf("name does not match manifest name") + } + + if manifest.Tag != tag { + return fmt.Errorf("tag does not match manifest tag") + } + + var errs []error + + for _, fsLayer := range manifest.FSLayers { + exists, err := ms.layerService.Exists(name, fsLayer.BlobSum) + if err != nil { + // TODO(stevvooe): Need to store information about missing blob. + errs = append(errs, err) + } + + if !exists { + errs = append(errs, fmt.Errorf("missing layer %v", fsLayer.BlobSum)) + } + } + + if len(errs) != 0 { + // TODO(stevvooe): These need to be recoverable by a caller. + return fmt.Errorf("missing layers: %v", errs) + } + + js, err := libtrust.ParsePrettySignature(manifest.Raw, "signatures") + if err != nil { + return err + } + + _, err = js.Verify() // These pubkeys need to be checked. + if err != nil { + return err + } + + // TODO(sday): Pubkey checks need to go here. This where things get fancy. + // Perhaps, an injected service would reduce coupling here. + + return nil +} diff --git a/storage/services.go b/storage/services.go index afb26d94..1f6d5e51 100644 --- a/storage/services.go +++ b/storage/services.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" ) @@ -41,3 +42,42 @@ func NewServices(driver storagedriver.StorageDriver) *Services { func (ss *Services) Layers() LayerService { return &layerStore{driver: ss.driver, pathMapper: ss.pathMapper, uploadStore: ss.layerUploadStore} } + +// Manifests returns an instance of ManifestService. Instantiation is cheap and +// may be context sensitive in the future. The instance should be used similar +// to a request local. +func (ss *Services) Manifests() ManifestService { + return &manifestStore{driver: ss.driver, pathMapper: ss.pathMapper, layerService: ss.Layers()} +} + +// ManifestService provides operations on image manifests. +type ManifestService interface { + // Exists returns true if the layer exists. + Exists(name, tag string) (bool, error) + + // Get retrieves the named manifest, if it exists. + Get(name, tag string) (*SignedManifest, error) + + // Put creates or updates the named manifest. + Put(name, tag string, manifest *SignedManifest) error + + // Delete removes the named manifest, if it exists. + Delete(name, tag string) error +} + +// LayerService provides operations on layer files in a backend storage. +type LayerService interface { + // Exists returns true if the layer exists. + Exists(name string, digest digest.Digest) (bool, error) + + // Fetch the layer identifed by TarSum. + Fetch(name string, digest digest.Digest) (Layer, error) + + // Upload begins a layer upload to repository identified by name, + // returning a handle. + Upload(name string) (LayerUpload, error) + + // Resume continues an in progress layer upload, returning the current + // state of the upload. + Resume(uuid string) (LayerUpload, error) +} From 50d64ac63a41f6d9f29fa5d3e3b82c32ed7732a1 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 18 Nov 2014 15:44:39 -0800 Subject: [PATCH 070/165] Allows layers to be partially pulled and resumed Adds a sort of contrived test for resumable pulls --- client/client_test.go | 131 ++++++++++++++++++++++++++++++++++++ client/objectstore.go | 150 +++++++++++++++++++++++++++++++----------- client/pull.go | 30 ++++++--- client/push.go | 19 +++--- 4 files changed, 271 insertions(+), 59 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index a77e7665..267f5a5b 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -117,6 +117,7 @@ func TestPush(t *testing.T) { t.Fatal(err) } + writer.SetSize(len(blob.contents)) writer.Write(blob.contents) writer.Close() } @@ -235,3 +236,133 @@ func TestPull(t *testing.T) { } } } + +func TestPullResume(t *testing.T) { + name := "hello/world" + tag := "sometag" + testBlobs := []testBlob{ + { + digest: "12345", + contents: []byte("some contents"), + }, + { + digest: "98765", + contents: []byte("some other contents"), + }, + } + layers := make([]registry.FSLayer, len(testBlobs)) + history := make([]registry.ManifestHistory, len(testBlobs)) + + for i, layer := range testBlobs { + layers[i] = registry.FSLayer{BlobSum: layer.digest} + history[i] = registry.ManifestHistory{V1Compatibility: layer.digest.String()} + } + + manifest := ®istry.ImageManifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: layers, + History: history, + SchemaVersion: 1, + } + manifestBytes, err := json.Marshal(manifest) + + layerRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) + for i, blob := range testBlobs { + layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/blob/" + blob.digest.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: blob.contents[:len(blob.contents)/2], + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(blob.contents))}, + }), + }, + } + layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/blob/" + blob.digest.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: blob.contents[len(blob.contents)/2:], + }, + } + } + + for i := 0; i < 3; i++ { + layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMap{ + testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/manifest/" + tag, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + }, + }...) + } + + handler := testutil.NewHandler(layerRequestResponseMappings) + server := httptest.NewServer(handler) + client := New(server.URL) + objectStore := &memoryObjectStore{ + mutex: new(sync.Mutex), + manifestStorage: make(map[string]*registry.ImageManifest), + layerStorage: make(map[digest.Digest]Layer), + } + + for attempts := 0; attempts < 3; attempts++ { + err = Pull(client, objectStore, name, tag) + if err == nil { + break + } + } + + if err != nil { + t.Fatal(err) + } + + m, err := objectStore.Manifest(name, tag) + if err != nil { + t.Fatal(err) + } + + mBytes, err := json.Marshal(m) + if err != nil { + t.Fatal(err) + } + + if string(mBytes) != string(manifestBytes) { + t.Fatal("Incorrect manifest") + } + + for _, blob := range testBlobs { + l, err := objectStore.Layer(blob.digest) + if err != nil { + t.Fatal(err) + } + + reader, err := l.Reader() + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + layerBytes, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + + if string(layerBytes) != string(blob.contents) { + t.Fatal("Incorrect blob") + } + } +} diff --git a/client/objectstore.go b/client/objectstore.go index bee73ff0..2e6f0b45 100644 --- a/client/objectstore.go +++ b/client/objectstore.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "sync" "github.com/docker/docker-registry" @@ -39,20 +38,49 @@ type ObjectStore interface { } // Layer is a generic image layer interface. -// A Layer may only be written to once +// A Layer may not be written to if it is already complete. type Layer interface { - // Reader returns an io.ReadCloser which reads the contents of the layer - Reader() (io.ReadCloser, error) + // Reader returns a LayerReader or an error if the layer has not been + // written to or is currently being written to. + Reader() (LayerReader, error) - // Writer returns an io.WriteCloser which may write the contents of the - // layer. This method may only be called once per Layer, and the contents - // are made available on Close - Writer() (io.WriteCloser, error) + // Writer returns a LayerWriter or an error if the layer has been fully + // written to or is currently being written to. + Writer() (LayerWriter, error) - // Wait blocks until the Layer can be read from + // Wait blocks until the Layer can be read from. Wait() error } +// LayerReader is a read-only handle to a Layer, which exposes the CurrentSize +// and full Size in addition to implementing the io.ReadCloser interface. +type LayerReader interface { + io.ReadCloser + + // CurrentSize returns the number of bytes written to the underlying Layer + CurrentSize() int + + // Size returns the full size of the underlying Layer + Size() int +} + +// LayerWriter is a write-only handle to a Layer, which exposes the CurrentSize +// and full Size in addition to implementing the io.WriteCloser interface. +// SetSize must be called on this LayerWriter before it can be written to. +type LayerWriter interface { + io.WriteCloser + + // CurrentSize returns the number of bytes written to the underlying Layer + CurrentSize() int + + // Size returns the full size of the underlying Layer + Size() int + + // SetSize sets the full size of the underlying Layer. + // This must be called before any calls to Write + SetSize(int) error +} + // memoryObjectStore is an in-memory implementation of the ObjectStore interface type memoryObjectStore struct { mutex *sync.Mutex @@ -93,67 +121,113 @@ func (objStore *memoryObjectStore) Layer(dgst digest.Digest) (Layer, error) { } type memoryLayer struct { - cond *sync.Cond - buffer *bytes.Buffer - written bool + cond *sync.Cond + contents []byte + expectedSize int + writing bool } -func (ml *memoryLayer) Writer() (io.WriteCloser, error) { +func (ml *memoryLayer) Reader() (LayerReader, error) { ml.cond.L.Lock() defer ml.cond.L.Unlock() - if ml.buffer != nil { - if !ml.written { - return nil, ErrLayerLocked - } - return nil, ErrLayerAlreadyExists - } - - ml.buffer = new(bytes.Buffer) - return &memoryLayerWriter{cond: ml.cond, buffer: ml.buffer, done: &ml.written}, nil -} - -func (ml *memoryLayer) Reader() (io.ReadCloser, error) { - ml.cond.L.Lock() - defer ml.cond.L.Unlock() - - if ml.buffer == nil { + if ml.contents == nil { return nil, fmt.Errorf("Layer has not been written to yet") } - if !ml.written { + if ml.writing { return nil, ErrLayerLocked } - return ioutil.NopCloser(bytes.NewReader(ml.buffer.Bytes())), nil + return &memoryLayerReader{ml: ml, reader: bytes.NewReader(ml.contents)}, nil +} + +func (ml *memoryLayer) Writer() (LayerWriter, error) { + ml.cond.L.Lock() + defer ml.cond.L.Unlock() + + if ml.contents != nil { + if ml.writing { + return nil, ErrLayerLocked + } + if ml.expectedSize == len(ml.contents) { + return nil, ErrLayerAlreadyExists + } + } else { + ml.contents = make([]byte, 0) + } + + ml.writing = true + return &memoryLayerWriter{ml: ml, buffer: bytes.NewBuffer(ml.contents)}, nil } func (ml *memoryLayer) Wait() error { ml.cond.L.Lock() defer ml.cond.L.Unlock() - if ml.buffer == nil { + if ml.contents == nil { return fmt.Errorf("No writer to wait on") } - for !ml.written { + for ml.writing { ml.cond.Wait() } return nil } +type memoryLayerReader struct { + ml *memoryLayer + reader *bytes.Reader +} + +func (mlr *memoryLayerReader) Read(p []byte) (int, error) { + return mlr.reader.Read(p) +} + +func (mlr *memoryLayerReader) Close() error { + return nil +} + +func (mlr *memoryLayerReader) CurrentSize() int { + return len(mlr.ml.contents) +} + +func (mlr *memoryLayerReader) Size() int { + return mlr.ml.expectedSize +} + type memoryLayerWriter struct { - cond *sync.Cond + ml *memoryLayer buffer *bytes.Buffer - done *bool } func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { - return mlw.buffer.Write(p) + if mlw.ml.expectedSize == 0 { + return 0, fmt.Errorf("Must set size before writing to layer") + } + wrote, err := mlw.buffer.Write(p) + mlw.ml.contents = mlw.buffer.Bytes() + return wrote, err } func (mlw *memoryLayerWriter) Close() error { - *mlw.done = true - mlw.cond.Broadcast() + mlw.ml.writing = false + mlw.ml.cond.Broadcast() + return nil +} + +func (mlw *memoryLayerWriter) CurrentSize() int { + return len(mlw.ml.contents) +} + +func (mlw *memoryLayerWriter) Size() int { + return mlw.ml.expectedSize +} + +func (mlw *memoryLayerWriter) SetSize(size int) error { + if !mlw.ml.writing { + return fmt.Errorf("Layer is closed for writing") + } + mlw.ml.expectedSize = size return nil } diff --git a/client/pull.go b/client/pull.go index bce06756..5d7ee56f 100644 --- a/client/pull.go +++ b/client/pull.go @@ -89,7 +89,7 @@ func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. return err } - writer, err := layer.Writer() + layerWriter, err := layer.Writer() if err == ErrLayerAlreadyExists { log.WithField("layer", fsLayer).Info("Layer already exists") return nil @@ -106,9 +106,17 @@ func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. }).Warn("Unable to write local layer") return err } - defer writer.Close() + defer layerWriter.Close() - layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, 0) + if layerWriter.CurrentSize() > 0 { + log.WithFields(log.Fields{ + "layer": fsLayer, + "currentSize": layerWriter.CurrentSize(), + "size": layerWriter.Size(), + }).Info("Layer partially downloaded, resuming") + } + + layerReader, length, err := c.GetBlob(name, fsLayer.BlobSum, layerWriter.CurrentSize()) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -118,7 +126,9 @@ func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. } defer layerReader.Close() - copied, err := io.Copy(writer, layerReader) + layerWriter.SetSize(layerWriter.CurrentSize() + length) + + _, err = io.Copy(layerWriter, layerReader) if err != nil { log.WithFields(log.Fields{ "error": err, @@ -126,15 +136,15 @@ func pullLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. }).Warn("Unable to download layer") return err } - if copied != int64(length) { + if layerWriter.CurrentSize() != layerWriter.Size() { log.WithFields(log.Fields{ - "expected": length, - "written": copied, - "layer": fsLayer, - }).Warn("Wrote incorrect number of bytes for layer") + "size": layerWriter.Size(), + "currentSize": layerWriter.CurrentSize(), + "layer": fsLayer, + }).Warn("Layer invalid size") return fmt.Errorf( "Wrote incorrect number of bytes for layer %v. Expected %d, Wrote %d", - fsLayer, length, copied, + fsLayer, layerWriter.Size(), layerWriter.CurrentSize(), ) } return nil diff --git a/client/push.go b/client/push.go index 08726058..fae5cc10 100644 --- a/client/push.go +++ b/client/push.go @@ -1,9 +1,7 @@ package client import ( - "bytes" - "io" - "io/ioutil" + "errors" "github.com/docker/docker-registry" @@ -96,14 +94,13 @@ func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. } defer layerReader.Close() - layerBuffer := new(bytes.Buffer) - layerSize, err := io.Copy(layerBuffer, layerReader) - if err != nil { + if layerReader.CurrentSize() != layerReader.Size() { log.WithFields(log.Fields{ - "error": err, - "layer": fsLayer, - }).Warn("Unable to read local layer") - return err + "layer": fsLayer, + "currentSize": layerReader.CurrentSize(), + "size": layerReader.Size(), + }).Warn("Local layer incomplete") + return errors.New("Local layer incomplete") } length, err := c.BlobLength(name, fsLayer.BlobSum) @@ -128,7 +125,7 @@ func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer registry. return err } - err = c.UploadBlob(location, ioutil.NopCloser(layerBuffer), int(layerSize), fsLayer.BlobSum) + err = c.UploadBlob(location, layerReader, int(layerReader.CurrentSize()), fsLayer.BlobSum) if err != nil { log.WithFields(log.Fields{ "error": err, From 66d2a13aec5364c384f38d9878327508c3c12b76 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 24 Nov 2014 16:38:33 -0800 Subject: [PATCH 071/165] Correct references after move of Manifest into storage package --- client/client_test.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index 93f01adb..57578c81 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -258,21 +258,23 @@ func TestPullResume(t *testing.T) { contents: []byte("some other contents"), }, } - layers := make([]registry.FSLayer, len(testBlobs)) - history := make([]registry.ManifestHistory, len(testBlobs)) + layers := make([]storage.FSLayer, len(testBlobs)) + history := make([]storage.ManifestHistory, len(testBlobs)) for i, layer := range testBlobs { - layers[i] = registry.FSLayer{BlobSum: layer.digest} - history[i] = registry.ManifestHistory{V1Compatibility: layer.digest.String()} + layers[i] = storage.FSLayer{BlobSum: layer.digest} + history[i] = storage.ManifestHistory{V1Compatibility: layer.digest.String()} } - manifest := ®istry.ImageManifest{ - Name: name, - Tag: tag, - Architecture: "x86", - FSLayers: layers, - History: history, - SchemaVersion: 1, + manifest := &storage.Manifest{ + Name: name, + Tag: tag, + Architecture: "x86", + FSLayers: layers, + History: history, + Versioned: storage.Versioned{ + SchemaVersion: 1, + }, } manifestBytes, err := json.Marshal(manifest) @@ -323,7 +325,7 @@ func TestPullResume(t *testing.T) { client := New(server.URL) objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), - manifestStorage: make(map[string]*registry.ImageManifest), + manifestStorage: make(map[string]*storage.SignedManifest), layerStorage: make(map[digest.Digest]Layer), } From 68944ea9cfb76a6bc2b6889a4a1c00e0b3a0ac14 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 24 Nov 2014 16:21:02 -0800 Subject: [PATCH 072/165] Clean up layer storage layout Previously, discussions were still ongoing about different storage layouts that could support various access models. This changeset removes a layer of indirection that was in place due to earlier designs. Effectively, this both associates a layer with a named repository and ensures that content cannot be accessed across repositories. It also moves to rely on tarsum as a true content-addressable identifier, removing a layer of indirection during blob resolution. --- digest/digest.go | 59 ++++++++------- storage/layer_test.go | 13 +--- storage/layerstore.go | 106 +++++++-------------------- storage/layerupload.go | 62 ++-------------- storage/paths.go | 158 +++++++++++++++++++---------------------- storage/paths_test.go | 25 ++++--- 6 files changed, 153 insertions(+), 270 deletions(-) diff --git a/digest/digest.go b/digest/digest.go index cbd0ab6b..6a3fdfd5 100644 --- a/digest/digest.go +++ b/digest/digest.go @@ -47,32 +47,9 @@ var ( // ParseDigest parses s and returns the validated digest object. An error will // be returned if the format is invalid. func ParseDigest(s string) (Digest, error) { - // Common case will be tarsum - _, err := common.ParseTarSum(s) - if err == nil { - return Digest(s), nil - } + d := Digest(s) - // Continue on for general parser - - i := strings.Index(s, ":") - if i < 0 { - return "", ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return "", ErrDigestInvalidFormat - } - - switch s[:i] { - case "md5", "sha1", "sha256": - break - default: - return "", ErrDigestUnsupported - } - - return Digest(s), nil + return d, d.Validate() } // FromReader returns the most valid digest for the underlying content. @@ -119,6 +96,38 @@ func FromBytes(p []byte) (Digest, error) { return FromReader(bytes.NewReader(p)) } +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + // Common case will be tarsum + _, err := common.ParseTarSum(s) + if err == nil { + return nil + } + + // Continue on for general parser + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch s[:i] { + case "md5", "sha1", "sha256": + break + default: + return ErrDigestUnsupported + } + + return nil +} + // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. func (d Digest) Algorithm() string { diff --git a/storage/layer_test.go b/storage/layer_test.go index 03cba9b9..f04115e7 100644 --- a/storage/layer_test.go +++ b/storage/layer_test.go @@ -304,18 +304,13 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, blobDigestSHA := digest.NewDigest("sha256", h) blobPath, err := pathMapper.path(blobPathSpec{ - alg: blobDigestSHA.Algorithm(), - digest: blobDigestSHA.Hex(), + digest: dgst, }) if err := driver.PutContent(blobPath, p); err != nil { return "", err } - layerIndexLinkPath, err := pathMapper.path(layerIndexLinkPathSpec{ - digest: dgst, - }) - if err != nil { return "", err } @@ -329,11 +324,7 @@ func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, return "", err } - if err := driver.PutContent(layerLinkPath, []byte(blobDigestSHA.String())); err != nil { - return "", nil - } - - if err = driver.PutContent(layerIndexLinkPath, []byte(name)); err != nil { + if err := driver.PutContent(layerLinkPath, []byte(dgst)); err != nil { return "", nil } diff --git a/storage/layerstore.go b/storage/layerstore.go index 6abd50e3..2544ec4f 100644 --- a/storage/layerstore.go +++ b/storage/layerstore.go @@ -1,11 +1,8 @@ package storage import ( - "fmt" - "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" ) @@ -33,31 +30,17 @@ func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) { } func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { - repos, err := ls.resolveContainingRepositories(digest) - + blobPath, err := ls.resolveBlobPath(name, digest) if err != nil { - // TODO(stevvooe): Unknown tarsum error: need to wrap. - return nil, err + switch err := err.(type) { + case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: + return nil, ErrLayerUnknown + default: + return nil, err + } } - // TODO(stevvooe): Access control for layer pulls need to happen here: we - // have a list of repos that "own" the tarsum that need to be checked - // against the list of repos to which we have pull access. The argument - // repos needs to be filtered against that access list. - - _, blobPath, err := ls.resolveBlobPath(repos, digest) - - if err != nil { - // TODO(stevvooe): Map this error correctly, perhaps in the callee. - return nil, err - } - - p, err := ls.pathMapper.path(blobPath) - if err != nil { - return nil, err - } - - fr, err := newFileReader(ls.driver, p) + fr, err := newFileReader(ls.driver, blobPath) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: @@ -117,69 +100,30 @@ func (ls *layerStore) newLayerUpload(lus LayerUploadState) LayerUpload { } } -func (ls *layerStore) resolveContainingRepositories(digest digest.Digest) ([]string, error) { - // Lookup the layer link in the index by tarsum id. - layerIndexLinkPath, err := ls.pathMapper.path(layerIndexLinkPathSpec{digest: digest}) +// resolveBlobId looks up the blob location in the repositories from a +// layer/blob link file, returning blob path or an error on failure. +func (ls *layerStore) resolveBlobPath(name string, dgst digest.Digest) (string, error) { + pathSpec := layerLinkPathSpec{name: name, digest: dgst} + layerLinkPath, err := ls.pathMapper.path(pathSpec) + if err != nil { - return nil, err + return "", err } - layerIndexLinkContent, err := ls.driver.GetContent(layerIndexLinkPath) + layerLinkContent, err := ls.driver.GetContent(layerLinkPath) if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - return nil, ErrLayerUnknown - default: - return nil, err - } + return "", err } - results := strings.Split(string(layerIndexLinkContent), "\n") + // NOTE(stevvooe): The content of the layer link should match the digest. + // This layer of indirection is for name-based content protection. - // clean these up - for i, result := range results { - results[i] = strings.TrimSpace(result) + linked, err := digest.ParseDigest(string(layerLinkContent)) + if err != nil { + return "", err } - return results, nil -} - -// resolveBlobId lookups up the tarSum in the various repos to find the blob -// link, returning the repo name and blob path spec or an error on failure. -func (ls *layerStore) resolveBlobPath(repos []string, digest digest.Digest) (name string, bps blobPathSpec, err error) { - - for _, repo := range repos { - pathSpec := layerLinkPathSpec{name: repo, digest: digest} - layerLinkPath, err := ls.pathMapper.path(pathSpec) - - if err != nil { - // TODO(stevvooe): This looks very lazy, may want to collect these - // errors and report them if we exit this for loop without - // resolving the blob id. - logrus.Debugf("error building linkLayerPath (%V): %v", pathSpec, err) - continue - } - - layerLinkContent, err := ls.driver.GetContent(layerLinkPath) - if err != nil { - logrus.Debugf("error getting layerLink content (%V): %v", pathSpec, err) - continue - } - - // Yay! We've resolved our blob id and we're ready to go. - parts := strings.SplitN(strings.TrimSpace(string(layerLinkContent)), ":", 2) - - if len(parts) != 2 { - return "", bps, fmt.Errorf("invalid blob reference: %q", string(layerLinkContent)) - } - - name = repo - bp := blobPathSpec{alg: parts[0], digest: parts[1]} - - return repo, bp, nil - } - - // TODO(stevvooe): Map this error to repo not found, but it basically - // means we exited the loop above without finding a blob link. - return "", bps, fmt.Errorf("unable to resolve blog id for repos=%v and digest=%v", repos, digest) + bp := blobPathSpec{digest: linked} + + return ls.pathMapper.path(bp) } diff --git a/storage/layerupload.go b/storage/layerupload.go index f134aa19..e0336a0f 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -6,8 +6,6 @@ import ( "io/ioutil" "os" "path/filepath" - "sort" - "strings" "code.google.com/p/go-uuid/uuid" @@ -285,10 +283,9 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d // writeLayer actually writes the the layer file into its final destination. // The layer should be validated before commencing the write. -func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest digest.Digest) error { +func (luc *layerUploadController) writeLayer(fp layerFile, size int64, dgst digest.Digest) error { blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{ - alg: digest.Algorithm(), - digest: digest.Hex(), + digest: dgst, }) if err != nil { @@ -324,8 +321,8 @@ func (luc *layerUploadController) writeLayer(fp layerFile, size int64, digest di return nil } -// linkLayer links a valid, written layer blog into the registry, first -// linking the repository namespace, then adding it to the layerindex. +// linkLayer links a valid, written layer blob into the registry under the +// named repository for the upload controller. func (luc *layerUploadController) linkLayer(digest digest.Digest) error { layerLinkPath, err := luc.layerStore.pathMapper.path(layerLinkPathSpec{ name: luc.Name(), @@ -336,56 +333,7 @@ func (luc *layerUploadController) linkLayer(digest digest.Digest) error { return err } - if err := luc.layerStore.driver.PutContent(layerLinkPath, []byte(digest)); err != nil { - return nil - } - - // Link the layer into the name index. - layerIndexLinkPath, err := luc.layerStore.pathMapper.path(layerIndexLinkPathSpec{ - digest: digest, - }) - - if err != nil { - return err - } - - // Read back the name index file. If it exists, create it. If not, add the - // new repo to the name list. - - // TODO(stevvooe): This is very racy, as well. Reconsider using list for - // this operation? - layerIndexLinkContent, err := luc.layerStore.driver.GetContent(layerIndexLinkPath) - if err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - layerIndexLinkContent = []byte(luc.Name()) - default: - return err - } - } - layerIndexLinkContent = luc.maybeAddNameToLayerIndexLinkContent(layerIndexLinkContent) - - // Write the index content back to the index. - return luc.layerStore.driver.PutContent(layerIndexLinkPath, layerIndexLinkContent) -} - -func (luc *layerUploadController) maybeAddNameToLayerIndexLinkContent(content []byte) []byte { - names := strings.Split(string(content), "\n") - var found bool - // Search the names and find ours - for _, name := range names { - if name == luc.Name() { - found = true - } - } - - if !found { - names = append(names, luc.Name()) - } - - sort.Strings(names) - - return []byte(strings.Join(names, "\n")) + return luc.layerStore.driver.PutContent(layerLinkPath, []byte(digest)) } // localFSLayerUploadStore implements a local layerUploadStore. There are some diff --git a/storage/paths.go b/storage/paths.go index 87c0b2fd..ecc3dd32 100644 --- a/storage/paths.go +++ b/storage/paths.go @@ -11,11 +11,6 @@ import ( const storagePathVersion = "v2" -// TODO(sday): This needs to be changed: all layers for an image will be -// linked under the repository. Lookup from tarsum to name is not necessary, -// so we can remove the layer index. For this to properly work, image push -// must link the images layers under the repo. - // pathMapper maps paths based on "object names" and their ids. The "object // names" mapped by pathMapper are internal to the storage system. // @@ -27,31 +22,21 @@ const storagePathVersion = "v2" // -> manifests/ // // -> layers/ -// -> tarsum/ -// -> / -// -> / -// -// -> layerindex/ -// -> tarsum/ -// -> / -// -> / -// -// -> blob/sha256 -// +// +// -> blob/ +// // // There are few important components to this path layout. First, we have the // repository store identified by name. This contains the image manifests and // a layer store with links to CAS blob ids. Outside of the named repo area, -// we have the layerindex, which provides lookup from tarsum id to repo -// storage. The blob store contains the actual layer data and any other data -// that can be referenced by a CAS id. +// we have the the blob store. It contains the actual layer data and any other +// data that can be referenced by a CAS id. // // We cover the path formats implemented by this path mapper below. // // manifestPathSpec: /v2/repositories//manifests/ // layerLinkPathSpec: /v2/repositories//layers/tarsum/// -// layerIndexLinkPathSpec: /v2/layerindex/tarsum/// -// blobPathSpec: /v2/blob/sha256// +// blobPathSpec: /v2/blob/// // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. @@ -60,16 +45,6 @@ type pathMapper struct { version string // should be a constant? } -// TODO(stevvooe): This storage layout currently allows lookup to layer stores -// by repo name via the tarsum. The layer index lookup could come with an -// access control check against the link contents before proceeding. The main -// problem with this comes with a collision in the tarsum algorithm: if party -// A uploads a layer before party B, with an identical tarsum, party B may -// never be able to get access to the tarsum stored under party A. We'll need -// a way for party B to associate with a "unique" version of their image. This -// may be as simple as forcing the client to re-upload images to which they -// don't have access. - // path returns the path identified by spec. func (pm *pathMapper) path(spec pathSpec) (string, error) { @@ -93,44 +68,34 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { // TODO(sday): May need to store manifest by architecture. return path.Join(append(repoPrefix, v.name, "manifests", v.tag)...), nil case layerLinkPathSpec: - if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { - // Only tarsum is supported, for now - return "", fmt.Errorf("unsupport content digest: %v", v.digest) - } - - tsi, err := common.ParseTarSum(v.digest.String()) - + components, err := digestPathComoponents(v.digest) if err != nil { - // TODO(sday): This will return an InvalidTarSumError from - // ParseTarSum but we may want to wrap this. This error should - // never be encountered in production, since the tarsum should be - // validated by this point. return "", err } - return path.Join(append(append(repoPrefix, v.name, "layers"), - tarSumInfoPathComponents(tsi)...)...), nil - case layerIndexLinkPathSpec: - if !strings.HasPrefix(v.digest.Algorithm(), "tarsum") { + // For now, only map tarsum paths. + if components[0] != "tarsum" { // Only tarsum is supported, for now - return "", fmt.Errorf("unsupport content digest: %v", v.digest) + return "", fmt.Errorf("unsupported content digest: %v", v.digest) } - tsi, err := common.ParseTarSum(v.digest.String()) + layerLinkPathComponents := append(repoPrefix, v.name, "layers") - if err != nil { - // TODO(sday): This will return an InvalidTarSumError from - // ParseTarSum but we may want to wrap this. This error should - // never be encountered in production, since the tarsum should be - // validated by this point. - return "", err - } - - return path.Join(append(append(rootPrefix, "layerindex"), - tarSumInfoPathComponents(tsi)...)...), nil + return path.Join(append(layerLinkPathComponents, components...)...), nil case blobPathSpec: - p := path.Join([]string{pm.root, pm.version, "blob", v.alg, v.digest[:2], v.digest}...) - return p, nil + components, err := digestPathComoponents(v.digest) + if err != nil { + return "", err + } + + // For now, only map tarsum paths. + if components[0] != "tarsum" { + // Only tarsum is supported, for now + return "", fmt.Errorf("unsupported content digest: %v", v.digest) + } + + blobPathPrefix := append(rootPrefix, "blob") + return path.Join(append(blobPathPrefix, components...)...), nil default: // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). return "", fmt.Errorf("unknown path spec: %#v", v) @@ -172,40 +137,61 @@ type layerLinkPathSpec struct { func (layerLinkPathSpec) pathSpec() {} -// layerIndexLinkPath provides a path to a registry global layer store, -// indexed by tarsum. The target file will contain the repo name of the -// "owner" of the layer. An example name link file follows: -// -// library/ubuntu -// foo/bar -// -// The above file has the tarsum stored under the foo/bar repository and the -// library/ubuntu repository. The storage layer should access the tarsum from -// the first repository to which the client has access. -type layerIndexLinkPathSpec struct { - digest digest.Digest -} - -func (layerIndexLinkPathSpec) pathSpec() {} +// blobAlgorithmReplacer does some very simple path sanitization for user +// input. Mostly, this is to provide some heirachry for tarsum digests. Paths +// should be "safe" before getting this far due to strict digest requirements +// but we can add further path conversion here, if needed. +var blobAlgorithmReplacer = strings.NewReplacer( + "+", "/", + ".", "/", + ";", "/", +) // blobPath contains the path for the registry global blob store. For now, // this contains layer data, exclusively. type blobPathSpec struct { - // TODO(stevvooe): Port this to make better use of Digest type. - alg string - digest string + digest digest.Digest } func (blobPathSpec) pathSpec() {} -// tarSumInfoPath generates storage path components for the provided -// TarSumInfo. -func tarSumInfoPathComponents(tsi common.TarSumInfo) []string { - version := tsi.Version - - if version == "" { - version = "v0" +// digestPathComoponents provides a consistent path breakdown for a given +// digest. For a generic digest, it will be as follows: +// +// // +// +// Most importantly, for tarsum, the layout looks like this: +// +// tarsum//// +// +// This is slightly specialized to store an extra version path for version 0 +// tarsums. +func digestPathComoponents(dgst digest.Digest) ([]string, error) { + if err := dgst.Validate(); err != nil { + return nil, err } - return []string{"tarsum", version, tsi.Algorithm, tsi.Digest} + algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) + hex := dgst.Hex() + prefix := []string{algorithm} + suffix := []string{ + hex[:2], // Breaks heirarchy up. + hex, + } + + if tsi, err := common.ParseTarSum(dgst.String()); err == nil { + // We have a tarsum! + version := tsi.Version + if version == "" { + version = "v0" + } + + prefix = []string{ + "tarsum", + version, + tsi.Algorithm, + } + } + + return append(prefix, suffix...), nil } diff --git a/storage/paths_test.go b/storage/paths_test.go index d2ff542f..33681f39 100644 --- a/storage/paths_test.go +++ b/storage/paths_test.go @@ -28,20 +28,25 @@ func TestPathMapper(t *testing.T) { name: "foo/bar", digest: digest.Digest("tarsum.v1+test:abcdef"), }, - expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/abcdef", - }, - { - spec: layerIndexLinkPathSpec{ - digest: digest.Digest("tarsum.v1+test:abcdef"), - }, - expected: "/pathmapper-test/layerindex/tarsum/v1/test/abcdef", + expected: "/pathmapper-test/repositories/foo/bar/layers/tarsum/v1/test/ab/abcdef", }, { spec: blobPathSpec{ - alg: "sha512", - digest: "abcdefabcdefabcdef908909909", + digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), }, - expected: "/pathmapper-test/blob/sha512/ab/abcdefabcdefabcdef908909909", + expected: "/pathmapper-test/blob/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909", + }, + { + spec: blobPathSpec{ + digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blob/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909", + }, + { + spec: blobPathSpec{ + digest: digest.Digest("tarsum+sha256:abcdefabcdefabcdef908909909"), + }, + expected: "/pathmapper-test/blob/tarsum/v0/sha256/ab/abcdefabcdefabcdef908909909", }, } { p, err := pm.path(testcase.spec) From c5bb224bf958f69b3830c75b1aa3f4b80925b113 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Tue, 25 Nov 2014 21:43:12 -0800 Subject: [PATCH 073/165] Fix read offset check for inmemory driver Signed-off-by: Ahmet Alp Balkan --- storagedriver/inmemory/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 0d28b2da..98b068e9 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -67,7 +67,7 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { contents, err := d.GetContent(path) if err != nil { return nil, err - } else if len(contents) < int(offset) { + } else if len(contents) <= int(offset) { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } From 6fead9073642f19095520ef508b5c48b1aaa7393 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 26 Nov 2014 12:52:52 -0800 Subject: [PATCH 074/165] Rich error reporting for manifest push To provide rich error reporting during manifest pushes, the storage layers verifyManifest stage has been modified to provide the necessary granularity. Along with this comes with a partial shift to explicit error types, which represents a small move in larger refactoring of error handling. Signature methods from libtrust have been added to the various Manifest types to clean up the verification code. A primitive deletion implementation for manifests has been added. It only deletes the manifest file and doesn't attempt to add some of the richer features request, such as layer cleanup. --- storage/layer.go | 24 ++++++--- storage/layer_test.go | 8 +-- storage/layerstore.go | 7 +-- storage/layerupload.go | 2 +- storage/manifest.go | 110 +++++++++++++++++++++++++++++++++------ storage/manifest_test.go | 9 +++- storage/manifeststore.go | 61 ++++++++++++++-------- 7 files changed, 168 insertions(+), 53 deletions(-) diff --git a/storage/layer.go b/storage/layer.go index dc6b3422..2ad91314 100644 --- a/storage/layer.go +++ b/storage/layer.go @@ -53,9 +53,6 @@ type LayerUpload interface { } var ( - // ErrLayerUnknown returned when layer cannot be found. - ErrLayerUnknown = fmt.Errorf("unknown layer") - // ErrLayerExists returned when layer already exists ErrLayerExists = fmt.Errorf("layer exists") @@ -65,9 +62,6 @@ var ( // ErrLayerUploadUnknown returned when upload is not found. ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - // ErrLayerInvalidDigest returned when tarsum check fails. - ErrLayerInvalidDigest = fmt.Errorf("invalid layer digest") - // ErrLayerInvalidLength returned when length check fails. ErrLayerInvalidLength = fmt.Errorf("invalid layer length") @@ -75,3 +69,21 @@ var ( // Layer or LayerUpload. ErrLayerClosed = fmt.Errorf("layer closed") ) + +// ErrUnknownLayer returned when layer cannot be found. +type ErrUnknownLayer struct { + FSLayer FSLayer +} + +func (err ErrUnknownLayer) Error() string { + return fmt.Sprintf("unknown layer %v", err.FSLayer.BlobSum) +} + +// ErrLayerInvalidDigest returned when tarsum check fails. +type ErrLayerInvalidDigest struct { + FSLayer FSLayer +} + +func (err ErrLayerInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v", err.FSLayer.BlobSum) +} diff --git a/storage/layer_test.go b/storage/layer_test.go index f04115e7..ba92d2de 100644 --- a/storage/layer_test.go +++ b/storage/layer_test.go @@ -169,11 +169,13 @@ func TestSimpleLayerRead(t *testing.T) { t.Fatalf("error expected fetching unknown layer") } - if err != ErrLayerUnknown { - t.Fatalf("unexpected error fetching non-existent layer: %v", err) - } else { + switch err.(type) { + case ErrUnknownLayer: err = nil + default: + t.Fatalf("unexpected error fetching non-existent layer: %v", err) } + randomLayerDigest, err := writeTestLayer(driver, ls.pathMapper, imageName, dgst, randomLayerReader) if err != nil { t.Fatalf("unexpected error writing test layer: %v", err) diff --git a/storage/layerstore.go b/storage/layerstore.go index 2544ec4f..d731a5b8 100644 --- a/storage/layerstore.go +++ b/storage/layerstore.go @@ -19,7 +19,8 @@ func (ls *layerStore) Exists(name string, digest digest.Digest) (bool, error) { _, err := ls.Fetch(name, digest) if err != nil { - if err == ErrLayerUnknown { + switch err.(type) { + case ErrUnknownLayer: return false, nil } @@ -34,7 +35,7 @@ func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: - return nil, ErrLayerUnknown + return nil, ErrUnknownLayer{FSLayer{BlobSum: digest}} default: return nil, err } @@ -44,7 +45,7 @@ func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: - return nil, ErrLayerUnknown + return nil, ErrUnknownLayer{FSLayer{BlobSum: digest}} default: return nil, err } diff --git a/storage/layerupload.go b/storage/layerupload.go index e0336a0f..de1a894b 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -275,7 +275,7 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d } if !digestVerifier.Verified() { - return "", ErrLayerInvalidDigest + return "", ErrLayerInvalidDigest{FSLayer{BlobSum: dgst}} } return dgst, nil diff --git a/storage/manifest.go b/storage/manifest.go index 9921fbea..8b288625 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -1,23 +1,48 @@ package storage import ( + "crypto/x509" "encoding/json" "fmt" + "strings" "github.com/docker/libtrust" "github.com/docker/docker-registry/digest" ) -var ( - // ErrManifestUnknown is returned if the manifest is not known by the - // registry. - ErrManifestUnknown = fmt.Errorf("unknown manifest") +// ErrUnknownManifest is returned if the manifest is not known by the +// registry. +type ErrUnknownManifest struct { + Name string + Tag string +} - // ErrManifestUnverified is returned when the registry is unable to verify - // the manifest. - ErrManifestUnverified = fmt.Errorf("unverified manifest") -) +func (err ErrUnknownManifest) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} // Versioned provides a struct with just the manifest schemaVersion. Incoming // content with unknown schema version can be decoded against this struct to @@ -78,7 +103,37 @@ func (m *Manifest) Sign(pk libtrust.PrivateKey) (*SignedManifest, error) { }, nil } -// SignedManifest provides an envelope for +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func (m *Manifest) SignWithChain(key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.Marshal(m) + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + Raw: pretty, + }, nil +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. It contains fields to type SignedManifest struct { Manifest @@ -88,28 +143,51 @@ type SignedManifest struct { Raw []byte `json:"-"` } +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func (sm *SignedManifest) Verify() ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func (sm *SignedManifest) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} + // UnmarshalJSON populates a new ImageManifest struct from JSON data. -func (m *SignedManifest) UnmarshalJSON(b []byte) error { +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { var manifest Manifest if err := json.Unmarshal(b, &manifest); err != nil { return err } - m.Manifest = manifest - m.Raw = b + sm.Manifest = manifest + sm.Raw = b return nil } // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner // contents. -func (m *SignedManifest) MarshalJSON() ([]byte, error) { - if len(m.Raw) > 0 { - return m.Raw, nil +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.Raw) > 0 { + return sm.Raw, nil } // If the raw data is not available, just dump the inner content. - return json.Marshal(&m.Manifest) + return json.Marshal(&sm.Manifest) } // FSLayer is a container struct for BlobSums defined in an image manifest diff --git a/storage/manifest_test.go b/storage/manifest_test.go index c96c1dec..e4517943 100644 --- a/storage/manifest_test.go +++ b/storage/manifest_test.go @@ -33,8 +33,13 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest should not exist") } - if _, err := ms.Get(name, tag); err != ErrManifestUnknown { - t.Fatalf("expected manifest unknown error: %v != %v", err, ErrManifestUnknown) + if _, err := ms.Get(name, tag); true { + switch err.(type) { + case ErrUnknownManifest: + break + default: + t.Fatalf("expected manifest unknown error: %#v", err) + } } manifest := Manifest{ diff --git a/storage/manifeststore.go b/storage/manifeststore.go index 1b76c8c0..707311b8 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -4,9 +4,8 @@ import ( "encoding/json" "fmt" - "github.com/docker/libtrust" - "github.com/docker/docker-registry/storagedriver" + "github.com/docker/libtrust" ) type manifestStore struct { @@ -45,7 +44,7 @@ func (ms *manifestStore) Get(name, tag string) (*SignedManifest, error) { if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: - return nil, ErrManifestUnknown + return nil, ErrUnknownManifest{Name: name, Tag: tag} default: return nil, err } @@ -73,13 +72,28 @@ func (ms *manifestStore) Put(name, tag string, manifest *SignedManifest) error { return err } - // TODO(stevvooe): Should we get manifest first? + // TODO(stevvooe): Should we get old manifest first? Perhaps, write, then + // move to ensure a valid manifest? return ms.driver.PutContent(p, manifest.Raw) } func (ms *manifestStore) Delete(name, tag string) error { - panic("not implemented") + p, err := ms.path(name, tag) + if err != nil { + return err + } + + if err := ms.driver.Delete(p); err != nil { + switch err := err.(type) { + case storagedriver.PathNotFoundError, *storagedriver.PathNotFoundError: + return ErrUnknownManifest{Name: name, Tag: tag} + default: + return err + } + } + + return nil } func (ms *manifestStore) path(name, tag string) (string, error) { @@ -90,6 +104,12 @@ func (ms *manifestStore) path(name, tag string) (string, error) { } func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManifest) error { + // TODO(stevvooe): This verification is present here, but this needs to be + // lifted out of the storage infrastructure and moved into a package + // oriented towards defining verifiers and reporting them with + // granularity. + + var errs ErrManifestVerification if manifest.Name != name { return fmt.Errorf("name does not match manifest name") } @@ -98,37 +118,34 @@ func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManife return fmt.Errorf("tag does not match manifest tag") } - var errs []error + // TODO(stevvooe): These pubkeys need to be checked with either Verify or + // VerifyWithChains. We need to define the exact source of the CA. + // Perhaps, its a configuration value injected into manifest store. + + if _, err := manifest.Verify(); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, ErrManifestUnverified{}) + default: + errs = append(errs, err) + } + } for _, fsLayer := range manifest.FSLayers { exists, err := ms.layerService.Exists(name, fsLayer.BlobSum) if err != nil { - // TODO(stevvooe): Need to store information about missing blob. errs = append(errs, err) } if !exists { - errs = append(errs, fmt.Errorf("missing layer %v", fsLayer.BlobSum)) + errs = append(errs, ErrUnknownLayer{FSLayer: fsLayer}) } } if len(errs) != 0 { // TODO(stevvooe): These need to be recoverable by a caller. - return fmt.Errorf("missing layers: %v", errs) + return errs } - js, err := libtrust.ParsePrettySignature(manifest.Raw, "signatures") - if err != nil { - return err - } - - _, err = js.Verify() // These pubkeys need to be checked. - if err != nil { - return err - } - - // TODO(sday): Pubkey checks need to go here. This where things get fancy. - // Perhaps, an injected service would reduce coupling here. - return nil } From 4a4d403655f6c030bdd38fad5fad0eef9b4fdb5e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 26 Nov 2014 13:14:20 -0800 Subject: [PATCH 075/165] Correct ENV declaration in Dockerfile --- project/dev-image/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/dev-image/Dockerfile b/project/dev-image/Dockerfile index 017c1a21..1e2a8471 100644 --- a/project/dev-image/Dockerfile +++ b/project/dev-image/Dockerfile @@ -6,7 +6,7 @@ ENV GOROOT /usr/local/go ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin ENV LANG C -ENG LC_ALL C +ENV LC_ALL C RUN apt-get update && apt-get install -y \ wget ca-certificates git mercurial bzr \ From e809796f5972ee3384afc622a73b09eda3cdf0a1 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 26 Nov 2014 12:16:58 -0800 Subject: [PATCH 076/165] Initial implementation of Manifest HTTP API Push, pull and delete of manifest files in the registry have been implemented on top of the storage services. Basic workflows, including reporting of missing manifests are tested, including various proposed response codes. Common testing functionality has been collected into shared methods. A test suite may be emerging but it might better to capture more edge cases (such as resumable upload, range requests, etc.) before we commit to a full approach. To support clearer test cases and simpler handler methods, an application aware urlBuilder has been added. We may want to export the functionality for use in the client, which could allow us to abstract away from gorilla/mux. A few error codes have been added to fill in error conditions missing from the proposal. Some use cases have identified some problems with the approach to error reporting that requires more work to reconcile. To resolve this, the mapping of Go errors into error types needs to pulled out of the handlers and into the application. We also need to move to type-based errors, with rich information, rather than value-based errors. ErrorHandlers will probably replace the http.Handlers to make this work correctly. Unrelated to the above, the "length" parameter has been migrated to "size" for completing layer uploads. This change should have gone out before but these diffs ending up being coupled with the parameter name change due to updates to the layer unit tests. --- api_test.go | 491 +++++++++++++++++++++++++++++++++---------------- app.go | 5 +- context.go | 2 + errors.go | 29 ++- errors_test.go | 2 +- helpers.go | 9 - images.go | 67 +++++++ layer.go | 26 +-- layerupload.go | 19 +- urls.go | 141 ++++++++++++++ 10 files changed, 581 insertions(+), 210 deletions(-) create mode 100644 urls.go diff --git a/api_test.go b/api_test.go index c850f141..cc27e5b0 100644 --- a/api_test.go +++ b/api_test.go @@ -1,6 +1,8 @@ package registry import ( + "bytes" + "encoding/json" "fmt" "io" "net/http" @@ -10,7 +12,9 @@ import ( "os" "testing" - "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/docker-registry/storage" _ "github.com/docker/docker-registry/storagedriver/inmemory" "github.com/gorilla/handlers" @@ -34,11 +38,10 @@ func TestLayerAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - router := v2APIRouter() + builder, err := newURLBuilderFromString(server.URL) - u, err := url.Parse(server.URL) if err != nil { - t.Fatalf("error parsing server url: %v", err) + t.Fatalf("error creating url builder: %v", err) } imageName := "foo/bar" @@ -52,154 +55,65 @@ func TestLayerAPI(t *testing.T) { // ----------------------------------- // Test fetch for non-existent content - r, err := router.GetRoute(routeNameBlob).Host(u.Host). - URL("name", imageName, - "digest", tarSumStr) + layerURL, err := builder.buildLayerURL(imageName, layerDigest) + if err != nil { + t.Fatalf("error building url: %v", err) + } - resp, err := http.Get(r.String()) + resp, err := http.Get(layerURL) if err != nil { t.Fatalf("unexpected error fetching non-existent layer: %v", err) } - switch resp.StatusCode { - case http.StatusNotFound: - break // expected - default: - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status fetching non-existent layer: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status fetching non-existent layer: %v, %v", resp.StatusCode, resp.Status) - } + checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) // ------------------------------------------ // Test head request for non-existent content - resp, err = http.Head(r.String()) - if err != nil { - t.Fatalf("unexpected error checking head on non-existent layer: %v", err) - } - - switch resp.StatusCode { - case http.StatusNotFound: - break // expected - default: - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status checking head on non-existent layer: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status checking head on non-existent layer: %v, %v", resp.StatusCode, resp.Status) - } - - // ------------------------------------------ - // Upload a layer - r, err = router.GetRoute(routeNameBlobUpload).Host(u.Host). - URL("name", imageName) - if err != nil { - t.Fatalf("error starting layer upload: %v", err) - } - - resp, err = http.Post(r.String(), "", nil) - if err != nil { - t.Fatalf("error starting layer upload: %v", err) - } - - if resp.StatusCode != http.StatusAccepted { - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status starting layer upload: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status starting layer upload: %v, %v", resp.StatusCode, resp.Status) - } - - if resp.Header.Get("Location") == "" { // TODO(stevvooe): Need better check here. - t.Fatalf("unexpected Location: %q != %q", resp.Header.Get("Location"), "foo") - } - - if resp.Header.Get("Content-Length") != "0" { - t.Fatalf("unexpected content-length: %q != %q", resp.Header.Get("Content-Length"), "0") - } - - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - layerFile.Seek(0, os.SEEK_SET) - - uploadURLStr := resp.Header.Get("Location") - - // TODO(sday): Cancel the layer upload here and restart. - - query := url.Values{ - "digest": []string{layerDigest.String()}, - "length": []string{fmt.Sprint(layerLength)}, - } - - uploadURL, err := url.Parse(uploadURLStr) - if err != nil { - t.Fatalf("unexpected error parsing url: %v", err) - } - - uploadURL.RawQuery = query.Encode() - - // Just do a monolithic upload - req, err := http.NewRequest("PUT", uploadURL.String(), layerFile) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error doing put: %v", err) - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - break // expected - default: - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status putting chunk: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status putting chunk: %v, %v", resp.StatusCode, resp.Status) - } - - if resp.Header.Get("Location") == "" { - t.Fatalf("unexpected Location: %q", resp.Header.Get("Location")) - } - - if resp.Header.Get("Content-Length") != "0" { - t.Fatalf("unexpected content-length: %q != %q", resp.Header.Get("Content-Length"), "0") - } - - layerURL := resp.Header.Get("Location") - - // ------------------------ - // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) if err != nil { t.Fatalf("unexpected error checking head on non-existent layer: %v", err) } - switch resp.StatusCode { - case http.StatusOK: - break // expected - default: - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status checking head on layer: %v, %v", resp.StatusCode, resp.Status) - } + checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status checking head on layer: %v, %v", resp.StatusCode, resp.Status) + // ------------------------------------------ + // Upload a layer + layerUploadURL, err := builder.buildLayerUploadURL(imageName) + if err != nil { + t.Fatalf("error building upload url: %v", err) } - logrus.Infof("fetch the layer") + resp, err = http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("error starting layer upload: %v", err) + } + + checkResponse(t, "starting layer upload", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerFile.Seek(0, os.SEEK_SET) + + // TODO(sday): Cancel the layer upload here and restart. + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, layerDigest, uploadURLBase, layerFile) + + // ------------------------ + // Use a head request to see if the layer exists. + resp, err = http.Head(layerURL) + if err != nil { + t.Fatalf("unexpected error checking head on existing layer: %v", err) + } + + checkResponse(t, "checking head on existing layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) + // ---------------- // Fetch the layer! resp, err = http.Get(layerURL) @@ -207,30 +121,299 @@ func TestLayerAPI(t *testing.T) { t.Fatalf("unexpected error fetching layer: %v", err) } - switch resp.StatusCode { - case http.StatusOK: - break // expected - default: - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status fetching layer: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) - t.Fatalf("unexpected status fetching layer: %v, %v", resp.StatusCode, resp.Status) - } + checkResponse(t, "fetching layer", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{fmt.Sprint(layerLength)}, + }) // Verify the body verifier := digest.NewDigestVerifier(layerDigest) io.Copy(verifier, resp.Body) if !verifier.Verified() { - d, err := httputil.DumpResponse(resp, true) - if err != nil { - t.Fatalf("unexpected status checking head on layer ayo!: %v, %v", resp.StatusCode, resp.Status) - } - - t.Logf("response:\n%s", string(d)) t.Fatalf("response body did not pass verification") } } + +func TestManifestAPI(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := newURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("unexpected error creating url builder: %v", err) + } + + imageName := "foo/bar" + tag := "thetag" + + manifestURL, err := builder.buildManifestURL(imageName, tag) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + + // TODO(stevvooe): Shoot. The error setup is not working out. The content- + // type headers are being set after writing the status code. + // if resp.Header.Get("Content-Type") != "application/json" { + // t.Fatalf("unexpected content type: %v != 'application/json'", + // resp.Header.Get("Content-Type")) + // } + dec := json.NewDecoder(resp.Body) + + var respErrs struct { + Errors []Error + } + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != ErrorCodeUnknownManifest { + t.Fatalf("expected manifest unknown error: got %v", respErrs) + } + + // -------------------------------- + // Attempt to push unsigned manifest with missing layers + unsignedManifest := &storage.Manifest{ + Name: imageName, + Tag: tag, + FSLayers: []storage.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + }, + } + + resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + defer resp.Body.Close() + checkResponse(t, "posting unsigned manifest", resp, http.StatusBadRequest) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + var unverified int + var missingLayers int + var invalidDigests int + + for _, err := range respErrs.Errors { + switch err.Code { + case ErrorCodeUnverifiedManifest: + unverified++ + case ErrorCodeUnknownLayer: + missingLayers++ + case ErrorCodeInvalidDigest: + // TODO(stevvooe): This error isn't quite descriptive enough -- + // the layer with an invalid digest isn't identified. + invalidDigests++ + default: + t.Fatalf("unexpected error: %v", err) + } + } + + if unverified != 1 { + t.Fatalf("should have received one unverified manifest error: %v", respErrs) + } + + if missingLayers != 2 { + t.Fatalf("should have received two missing layer errors: %v", respErrs) + } + + if invalidDigests != 2 { + t.Fatalf("should have received two invalid digest errors: %v", respErrs) + } + + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range unsignedManifest.FSLayers { + rs, dgstStr, err := testutil.CreateRandomTarFile() + + if err != nil { + t.Fatalf("error creating random layer %d: %v", i, err) + } + dgst := digest.Digest(dgstStr) + + expectedLayers[dgst] = rs + unsignedManifest.FSLayers[i].BlobSum = dgst + + uploadURLBase := startPushLayer(t, builder, imageName) + pushLayer(t, builder, imageName, dgst, uploadURLBase, rs) + } + + // ------------------- + // Push the signed manifest with all layers pushed. + signedManifest, err := unsignedManifest.Sign(pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + + resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) + + checkResponse(t, "putting manifest", resp, http.StatusOK) + + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + + var fetchedManifest storage.SignedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + t.Fatalf("manifests do not match") + } +} + +func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { + body, err := json.Marshal(v) + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, ub *urlBuilder, name string) string { + layerUploadURL, err := ub.buildLayerUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Location": []string{"*"}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +// pushLayer pushes the layer content returning the url on success. +func pushLayer(t *testing.T, ub *urlBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { + rsLength, _ := rs.Seek(0, os.SEEK_END) + rs.Seek(0, os.SEEK_SET) + + uploadURL := appendValues(uploadURLBase, url.Values{ + "digest": []string{dgst.String()}, + "size": []string{fmt.Sprint(rsLength)}, + }) + + // Just do a monolithic upload + req, err := http.NewRequest("PUT", uploadURL, rs) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error doing put: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) + + expectedLayerURL, err := ub.buildLayerURL(name, dgst) + if err != nil { + t.Fatalf("error building expected layer url: %v", err) + } + + checkHeaders(t, resp, http.Header{ + "Location": []string{expectedLayerURL}, + "Content-Length": []string{"0"}, + }) + + return resp.Header.Get("Location") +} + +func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { + if resp.StatusCode != expectedStatus { + t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) + maybeDumpResponse(t, resp) + + t.FailNow() + } +} + +func maybeDumpResponse(t *testing.T, resp *http.Response) { + if d, err := httputil.DumpResponse(resp, true); err != nil { + t.Logf("error dumping response: %v", err) + } else { + t.Logf("response:\n%s", string(d)) + } +} + +// matchHeaders checks that the response has at least the headers. If not, the +// test will fail. If a passed in header value is "*", any non-zero value will +// suffice as a match. +func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { + for k, vs := range headers { + if resp.Header.Get(k) == "" { + t.Fatalf("response missing header %q", k) + } + + for _, v := range vs { + if v == "*" { + // Just ensure there is some value. + if len(resp.Header[k]) > 0 { + continue + } + } + + for _, hv := range resp.Header[k] { + if hv != v { + t.Fatalf("header value not matched in response: %q != %q", hv, v) + } + } + } + } +} diff --git a/app.go b/app.go index 25bf572d..324cad29 100644 --- a/app.go +++ b/app.go @@ -108,8 +108,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) context := &Context{ - App: app, - Name: vars["name"], + App: app, + Name: vars["name"], + urlBuilder: newURLBuilderFromRequest(r), } // Store vars for underlying handlers. diff --git a/context.go b/context.go index c246d6ac..8d894e0f 100644 --- a/context.go +++ b/context.go @@ -24,4 +24,6 @@ type Context struct { // log provides a context specific logger. log *logrus.Entry + + urlBuilder *urlBuilder } diff --git a/errors.go b/errors.go index 113097dd..9593741d 100644 --- a/errors.go +++ b/errors.go @@ -34,6 +34,14 @@ const ( // match the provided tag. ErrorCodeInvalidTag + // ErrorCodeUnknownManifest returned when image manifest name and tag is + // unknown, accompanied by a 404 status. + ErrorCodeUnknownManifest + + // ErrorCodeInvalidManifest returned when an image manifest is invalid, + // typically during a PUT operation. + ErrorCodeInvalidManifest + // ErrorCodeUnverifiedManifest is returned when the manifest fails signature // validation. ErrorCodeUnverifiedManifest @@ -56,6 +64,8 @@ var errorCodeStrings = map[ErrorCode]string{ ErrorCodeInvalidLength: "INVALID_LENGTH", ErrorCodeInvalidName: "INVALID_NAME", ErrorCodeInvalidTag: "INVALID_TAG", + ErrorCodeUnknownManifest: "UNKNOWN_MANIFEST", + ErrorCodeInvalidManifest: "INVALID_MANIFEST", ErrorCodeUnverifiedManifest: "UNVERIFIED_MANIFEST", ErrorCodeUnknownLayer: "UNKNOWN_LAYER", ErrorCodeUnknownLayerUpload: "UNKNOWN_LAYER_UPLOAD", @@ -66,12 +76,14 @@ var errorCodesMessages = map[ErrorCode]string{ ErrorCodeUnknown: "unknown error", ErrorCodeInvalidDigest: "provided digest did not match uploaded content", ErrorCodeInvalidLength: "provided length did not match content length", - ErrorCodeInvalidName: "Manifest name did not match URI", - ErrorCodeInvalidTag: "Manifest tag did not match URI", - ErrorCodeUnverifiedManifest: "Manifest failed signature validation", - ErrorCodeUnknownLayer: "Referenced layer not available", + ErrorCodeInvalidName: "manifest name did not match URI", + ErrorCodeInvalidTag: "manifest tag did not match URI", + ErrorCodeUnknownManifest: "manifest not known", + ErrorCodeInvalidManifest: "manifest is invalid", + ErrorCodeUnverifiedManifest: "manifest failed signature validation", + ErrorCodeUnknownLayer: "referenced layer not available", ErrorCodeUnknownLayerUpload: "cannot resume unknown layer upload", - ErrorCodeUntrustedSignature: "Manifest signed by untrusted source", + ErrorCodeUntrustedSignature: "manifest signed by untrusted source", } var stringToErrorCode map[string]ErrorCode @@ -178,7 +190,12 @@ func (errs *Errors) Push(code ErrorCode, details ...interface{}) { // PushErr pushes an error interface onto the error stack. func (errs *Errors) PushErr(err error) { - errs.Errors = append(errs.Errors, err) + switch err.(type) { + case Error: + errs.Errors = append(errs.Errors, err) + default: + errs.Errors = append(errs.Errors, Error{Message: err.Error()}) + } } func (errs *Errors) Error() string { diff --git a/errors_test.go b/errors_test.go index 709b6ced..e0392eb6 100644 --- a/errors_test.go +++ b/errors_test.go @@ -69,7 +69,7 @@ func TestErrorsManagement(t *testing.T) { t.Fatalf("error marashaling errors: %v", err) } - expectedJSON := "{\"errors\":[{\"code\":\"INVALID_DIGEST\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"Referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" + expectedJSON := "{\"errors\":[{\"code\":\"INVALID_DIGEST\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) diff --git a/helpers.go b/helpers.go index 7714d029..6fce84a2 100644 --- a/helpers.go +++ b/helpers.go @@ -4,8 +4,6 @@ import ( "encoding/json" "io" "net/http" - - "github.com/gorilla/mux" ) // serveJSON marshals v and sets the content-type header to @@ -32,10 +30,3 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { handler.ServeHTTP(w, r) }) } - -// clondedRoute returns a clone of the named route from the router. -func clonedRoute(router *mux.Router, name string) *mux.Route { - route := new(mux.Route) - *route = *router.GetRoute(name) // clone the route - return route -} diff --git a/images.go b/images.go index f16a3560..495e193a 100644 --- a/images.go +++ b/images.go @@ -1,8 +1,13 @@ package registry import ( + "encoding/json" + "fmt" "net/http" + "github.com/docker/docker-registry/digest" + + "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) @@ -32,15 +37,77 @@ type imageManifestHandler struct { // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + manifest, err := manifests.Get(imh.Name, imh.Tag) + if err != nil { + imh.Errors.Push(ErrorCodeUnknownManifest, err) + w.WriteHeader(http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", fmt.Sprint(len(manifest.Raw))) + w.Write(manifest.Raw) } // PutImageManifest validates and stores and image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + dec := json.NewDecoder(r.Body) + var manifest storage.SignedManifest + if err := dec.Decode(&manifest); err != nil { + imh.Errors.Push(ErrorCodeInvalidManifest, err) + w.WriteHeader(http.StatusBadRequest) + return + } + + if err := manifests.Put(imh.Name, imh.Tag, &manifest); err != nil { + // TODO(stevvooe): These error handling switches really need to be + // handled by an app global mapper. + switch err := err.(type) { + case storage.ErrManifestVerification: + for _, verificationError := range err { + switch verificationError := verificationError.(type) { + case storage.ErrUnknownLayer: + imh.Errors.Push(ErrorCodeUnknownLayer, verificationError.FSLayer) + case storage.ErrManifestUnverified: + imh.Errors.Push(ErrorCodeUnverifiedManifest) + default: + if verificationError == digest.ErrDigestInvalidFormat { + // TODO(stevvooe): We need to really need to move all + // errors to types. Its much more straightforward. + imh.Errors.Push(ErrorCodeInvalidDigest) + } else { + imh.Errors.PushErr(verificationError) + } + } + } + default: + imh.Errors.PushErr(err) + } + + w.WriteHeader(http.StatusBadRequest) + return + } } // DeleteImageManifest removes the image with the given tag from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { + manifests := imh.services.Manifests() + if err := manifests.Delete(imh.Name, imh.Tag); err != nil { + switch err := err.(type) { + case storage.ErrUnknownManifest: + imh.Errors.Push(ErrorCodeUnknownManifest, err) + w.WriteHeader(http.StatusNotFound) + default: + imh.Errors.Push(ErrorCodeUnknown, err) + w.WriteHeader(http.StatusBadRequest) + } + return + } + w.Header().Set("Content-Length", "0") + w.WriteHeader(http.StatusAccepted) } diff --git a/layer.go b/layer.go index 5e1c6f45..4d937c64 100644 --- a/layer.go +++ b/layer.go @@ -6,7 +6,6 @@ import ( "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" - "github.com/gorilla/mux" ) // layerDispatcher uses the request context to build a layerHandler. @@ -47,33 +46,16 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { layer, err := layers.Fetch(lh.Name, lh.Digest) if err != nil { - switch err { - case storage.ErrLayerUnknown: + switch err := err.(type) { + case storage.ErrUnknownLayer: w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(ErrorCodeUnknownLayer, - map[string]interface{}{ - "unknown": storage.FSLayer{BlobSum: lh.Digest}, - }) - return + lh.Errors.Push(ErrorCodeUnknownLayer, err.FSLayer) default: lh.Errors.Push(ErrorCodeUnknown, err) - return } + return } defer layer.Close() http.ServeContent(w, r, layer.Digest().String(), layer.CreatedAt(), layer) } - -func buildLayerURL(router *mux.Router, r *http.Request, layer storage.Layer) (string, error) { - route := clonedRoute(router, routeNameBlob) - - layerURL, err := route.Schemes(r.URL.Scheme).Host(r.Host). - URL("name", layer.Name(), - "digest", layer.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} diff --git a/layerupload.go b/layerupload.go index d1ec4206..d7aaa24f 100644 --- a/layerupload.go +++ b/layerupload.go @@ -10,7 +10,6 @@ import ( "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" - "github.com/gorilla/mux" ) // layerUploadDispatcher constructs and returns the layer upload handler for @@ -151,7 +150,7 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. // chunk responses. This sets the correct headers but the response status is // left to the caller. func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { - uploadURL, err := buildLayerUploadURL(luh.router, r, luh.Upload) + uploadURL, err := luh.urlBuilder.forLayerUpload(luh.Upload) if err != nil { logrus.Infof("error building upload url: %s", err) return err @@ -171,7 +170,7 @@ var errNotReadyToComplete = fmt.Errorf("not ready to complete upload") func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *http.Request) error { // If we get a digest and length, we can finish the upload. dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! - sizeStr := r.FormValue("length") + sizeStr := r.FormValue("size") if dgstStr == "" || sizeStr == "" { return errNotReadyToComplete @@ -200,7 +199,7 @@ func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Req return } - layerURL, err := buildLayerURL(luh.router, r, layer) + layerURL, err := luh.urlBuilder.forLayer(layer) if err != nil { luh.Errors.Push(ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) @@ -211,15 +210,3 @@ func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Req w.Header().Set("Content-Length", "0") w.WriteHeader(http.StatusCreated) } - -func buildLayerUploadURL(router *mux.Router, r *http.Request, upload storage.LayerUpload) (string, error) { - route := clonedRoute(router, routeNameBlobUploadResume) - - uploadURL, err := route.Schemes(r.URL.Scheme).Host(r.Host). - URL("name", upload.Name(), "uuid", upload.UUID()) - if err != nil { - return "", err - } - - return uploadURL.String(), nil -} diff --git a/urls.go b/urls.go new file mode 100644 index 00000000..d9e77f5e --- /dev/null +++ b/urls.go @@ -0,0 +1,141 @@ +package registry + +import ( + "net/http" + "net/url" + + "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" + "github.com/gorilla/mux" +) + +type urlBuilder struct { + url *url.URL // url root (ie http://localhost/) + router *mux.Router +} + +func newURLBuilder(root *url.URL) *urlBuilder { + return &urlBuilder{ + url: root, + router: v2APIRouter(), + } +} + +func newURLBuilderFromRequest(r *http.Request) *urlBuilder { + u := &url.URL{ + Scheme: r.URL.Scheme, + Host: r.Host, + } + + return newURLBuilder(u) +} + +func newURLBuilderFromString(root string) (*urlBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return newURLBuilder(u), nil +} + +func (ub *urlBuilder) forManifest(m *storage.Manifest) (string, error) { + return ub.buildManifestURL(m.Name, m.Tag) +} + +func (ub *urlBuilder) buildManifestURL(name, tag string) (string, error) { + route := clonedRoute(ub.router, routeNameImageManifest) + + manifestURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL("name", name, "tag", tag) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +func (ub *urlBuilder) forLayer(l storage.Layer) (string, error) { + return ub.buildLayerURL(l.Name(), l.Digest()) +} + +func (ub *urlBuilder) buildLayerURL(name string, dgst digest.Digest) (string, error) { + route := clonedRoute(ub.router, routeNameBlob) + + layerURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL("name", name, "digest", dgst.String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +func (ub *urlBuilder) buildLayerUploadURL(name string) (string, error) { + route := clonedRoute(ub.router, routeNameBlobUpload) + + uploadURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL("name", name) + if err != nil { + return "", err + } + + return uploadURL.String(), nil +} + +func (ub *urlBuilder) forLayerUpload(layerUpload storage.LayerUpload) (string, error) { + return ub.buildLayerUploadResumeURL(layerUpload.Name(), layerUpload.UUID()) +} + +func (ub *urlBuilder) buildLayerUploadResumeURL(name, uuid string, values ...url.Values) (string, error) { + route := clonedRoute(ub.router, routeNameBlobUploadResume) + + uploadURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL("name", name, "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} + +// clondedRoute returns a clone of the named route from the router. +func clonedRoute(router *mux.Router, name string) *mux.Route { + route := new(mux.Route) + *route = *router.GetRoute(name) // clone the route + return route +} From c70665750231151e8e0dc4a854fb7d5421bb4855 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 26 Nov 2014 15:33:33 -0800 Subject: [PATCH 077/165] Address race condition in client tests (closes #784) --- client/objectstore.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/client/objectstore.go b/client/objectstore.go index 814cdbab..06fba3d8 100644 --- a/client/objectstore.go +++ b/client/objectstore.go @@ -211,6 +211,13 @@ func (mlw *memoryLayerWriter) Write(p []byte) (int, error) { } func (mlw *memoryLayerWriter) Close() error { + mlw.ml.cond.L.Lock() + defer mlw.ml.cond.L.Unlock() + + return mlw.close() +} + +func (mlw *memoryLayerWriter) close() error { mlw.ml.writing = false mlw.ml.cond.Broadcast() return nil From 98f5f30e75338dcb4cd1c789d864469962da7aa3 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 15:57:05 -0800 Subject: [PATCH 078/165] Create copy of buffer for SignedManifest.Raw Without this copy, the buffer may be re-used in the json package, causing missing or corrupted content for the long-lived SignedManifest object. By creating a new buffer, owned by the SignedManifest object, the content remains stable. --- storage/manifest.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/manifest.go b/storage/manifest.go index 8b288625..895a112e 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -174,7 +174,8 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { } sm.Manifest = manifest - sm.Raw = b + sm.Raw = make([]byte, len(b), len(b)) + copy(sm.Raw, b) return nil } From b73a6c19980a2bdbcfcbac8d8af957dd7be6c764 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 16:11:27 -0800 Subject: [PATCH 079/165] Use json.MashalIndent for raw manifest json This provides compatibility with what is in docker core, ensuring that image manifests generated here have the same formatting. We'll need to automate this some how. --- storage/manifest.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/storage/manifest.go b/storage/manifest.go index 895a112e..000bc617 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" "github.com/docker/docker-registry/digest" @@ -78,7 +80,7 @@ type Manifest struct { // SignedManifest. This typically won't be used within the registry, except // for testing. func (m *Manifest) Sign(pk libtrust.PrivateKey) (*SignedManifest, error) { - p, err := json.Marshal(m) + p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } @@ -107,7 +109,7 @@ func (m *Manifest) Sign(pk libtrust.PrivateKey) (*SignedManifest, error) { // The public key of the first element in the chain must be the public key // corresponding with the sign key. func (m *Manifest) SignWithChain(key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { - p, err := json.Marshal(m) + p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } @@ -148,6 +150,7 @@ type SignedManifest struct { func (sm *SignedManifest) Verify() ([]libtrust.PublicKey, error) { js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") return nil, err } From 8c7bec72b1e4b3f724e862b538e3632e1fad0af9 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 16:13:01 -0800 Subject: [PATCH 080/165] Cleanup image verification error handling This diff removes a few early outs that caused errors to be unreported and catches a missed error case for signature verification from libtrust. More work needs to be done around ensuring consistent error handling but this is enough to make the API work correctly. --- storage/manifeststore.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/storage/manifeststore.go b/storage/manifeststore.go index 707311b8..e1760dd8 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -111,11 +111,13 @@ func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManife var errs ErrManifestVerification if manifest.Name != name { - return fmt.Errorf("name does not match manifest name") + // TODO(stevvooe): This needs to be an exported error + errs = append(errs, fmt.Errorf("name does not match manifest name")) } if manifest.Tag != tag { - return fmt.Errorf("tag does not match manifest tag") + // TODO(stevvooe): This needs to be an exported error. + errs = append(errs, fmt.Errorf("tag does not match manifest tag")) } // TODO(stevvooe): These pubkeys need to be checked with either Verify or @@ -127,7 +129,11 @@ func (ms *manifestStore) verifyManifest(name, tag string, manifest *SignedManife case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: errs = append(errs, ErrManifestUnverified{}) default: - errs = append(errs, err) + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } } } From e6e021906584398848ac73025e0d456577f98437 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 17:10:33 -0800 Subject: [PATCH 081/165] Avoid manifest verification errors by using Raw Because json.Marshal does compaction on returned results, applications must directly use SignedManifest.Raw when the marshaled value is required. Otherwise, the returned manifest will fail signature checks. --- api_test.go | 14 ++++++++++---- storage/manifest.go | 9 ++++++--- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/api_test.go b/api_test.go index cc27e5b0..cce4a251 100644 --- a/api_test.go +++ b/api_test.go @@ -277,7 +277,7 @@ func TestManifestAPI(t *testing.T) { resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting manifest", resp, http.StatusOK) + checkResponse(t, "putting signed manifest", resp, http.StatusOK) resp, err = http.Get(manifestURL) if err != nil { @@ -299,9 +299,15 @@ func TestManifestAPI(t *testing.T) { } func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { - body, err := json.Marshal(v) - if err != nil { - t.Fatalf("unexpected error marshaling %v: %v", v, err) + var body []byte + if sm, ok := v.(*storage.SignedManifest); ok { + body = sm.Raw + } else { + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } } req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) diff --git a/storage/manifest.go b/storage/manifest.go index 000bc617..daeaa39b 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -140,8 +140,9 @@ type SignedManifest struct { Manifest // Raw is the byte representation of the ImageManifest, used for signature - // verification. The manifest byte representation cannot change or it will - // have to be re-signed. + // verification. The value of Raw must be used directly during + // serialization, or the signature check will fail. The manifest byte + // representation cannot change or it will have to be re-signed. Raw []byte `json:"-"` } @@ -184,7 +185,9 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { } // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner -// contents. +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will +// compacted and will fail signature checks. func (sm *SignedManifest) MarshalJSON() ([]byte, error) { if len(sm.Raw) > 0 { return sm.Raw, nil From dd8eb6a8df76d82b5dc22f378640c33421ea0691 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 17:32:31 -0800 Subject: [PATCH 082/165] configuration.Parse should take io.Reader --- configuration/configuration.go | 20 +++++++++++++++++--- configuration/configuration_test.go | 24 ++++++++++++------------ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index 85d74d95..2d7e476b 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -2,6 +2,8 @@ package configuration import ( "fmt" + "io" + "io/ioutil" "os" "regexp" "strconv" @@ -21,6 +23,13 @@ type Configuration struct { // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` + + // HTTP contains configuration parameters for the registry's http + // interface. + HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr"` + } `yaml:"http"` } // v0_1Configuration is a Version 0.1 Configuration struct @@ -178,16 +187,21 @@ type Parameters map[string]string // following the scheme below: // Configuration.Abc may be replaced by the value of REGISTRY_ABC, // Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth -func Parse(in []byte) (*Configuration, error) { +func Parse(rd io.Reader) (*Configuration, error) { + in, err := ioutil.ReadAll(rd) + if err != nil { + return nil, err + } + var untypedConfig struct { Version Version } var config *Configuration - err := yaml.Unmarshal(in, &untypedConfig) - if err != nil { + if err := yaml.Unmarshal(in, &untypedConfig); err != nil { return nil, err } + if untypedConfig.Version == "" { return nil, fmt.Errorf("Please specify a configuration version. Current version is %s", CurrentVersion) } diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 31d15b7a..0e227653 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -1,12 +1,12 @@ package configuration import ( + "bytes" "os" "testing" - "gopkg.in/yaml.v2" - . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" ) // Hook up gocheck into the "go test" runner @@ -72,14 +72,14 @@ func (suite *ConfigSuite) SetUpTest(c *C) { func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) - config, err := Parse(configBytes) + config, err := Parse(bytes.NewReader(configBytes)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseSimple validates that configYamlV0_1 can be parsed into a struct matching configStruct func (suite *ConfigSuite) TestParseSimple(c *C) { - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -89,7 +89,7 @@ func (suite *ConfigSuite) TestParseSimple(c *C) { func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} - config, err := Parse([]byte(inmemoryConfigYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -100,7 +100,7 @@ func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -117,7 +117,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -129,7 +129,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { os.Setenv("REGISTRY_STORAGE", "inmemory") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -144,7 +144,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -154,7 +154,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "info") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -166,7 +166,7 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "error") - config, err := Parse([]byte(configYamlV0_1)) + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -177,7 +177,7 @@ func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) - _, err = Parse(configBytes) + _, err = Parse(bytes.NewReader(configBytes)) c.Assert(err, NotNil) } From 33016268302360f4fedf7753b830365c6ded36c4 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 17:36:20 -0800 Subject: [PATCH 083/165] Add registry main cmd --- cmd/registry/config.yml | 5 +++ cmd/registry/main.go | 85 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 cmd/registry/config.yml create mode 100644 cmd/registry/main.go diff --git a/cmd/registry/config.yml b/cmd/registry/config.yml new file mode 100644 index 00000000..2f9ad0a2 --- /dev/null +++ b/cmd/registry/config.yml @@ -0,0 +1,5 @@ +version: 0.1 +loglevel: debug +storage: inmemory +http: + addr: localhost:5000 \ No newline at end of file diff --git a/cmd/registry/main.go b/cmd/registry/main.go new file mode 100644 index 00000000..150c7d6b --- /dev/null +++ b/cmd/registry/main.go @@ -0,0 +1,85 @@ +package main + +import ( + "flag" + "fmt" + "net/http" + _ "net/http/pprof" + "os" + + "github.com/gorilla/handlers" + + log "github.com/Sirupsen/logrus" + + "github.com/docker/docker-registry" + "github.com/docker/docker-registry/configuration" + _ "github.com/docker/docker-registry/storagedriver/filesystem" + _ "github.com/docker/docker-registry/storagedriver/inmemory" + _ "github.com/docker/docker-registry/storagedriver/s3" +) + +func main() { + flag.Usage = usage + flag.Parse() + + config, err := resolveConfiguration() + if err != nil { + fatalf("configuration error: %v", err) + } + + app := registry.NewApp(*config) + handler := handlers.CombinedLoggingHandler(os.Stdout, app) + log.SetLevel(logLevel(config.Loglevel)) + + log.Infof("listening on %v", config.HTTP.Addr) + if err := http.ListenAndServe(config.HTTP.Addr, handler); err != nil { + log.Fatalln(err) + } +} + +func usage() { + fmt.Fprintln(os.Stderr, "usage:", os.Args[0], "") + flag.PrintDefaults() +} + +func fatalf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format+"\n", args...) + usage() + os.Exit(1) +} + +func resolveConfiguration() (*configuration.Configuration, error) { + var configurationPath string + + if flag.NArg() > 0 { + configurationPath = flag.Arg(0) + } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { + configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") + } + + if configurationPath == "" { + return nil, fmt.Errorf("configuration path unspecified") + } + + fp, err := os.Open(configurationPath) + if err != nil { + return nil, err + } + + config, err := configuration.Parse(fp) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) + } + + return config, nil +} + +func logLevel(level configuration.Loglevel) log.Level { + l, err := log.ParseLevel(string(level)) + if err != nil { + log.Warnf("error parsing level %q: %v", level, err) + l = log.InfoLevel + } + + return l +} From 17b32e0aa0f944d4a4e62ed8697859f8c3a7be6f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 17:40:14 -0800 Subject: [PATCH 084/165] Add TODO about manifest tampering test --- api_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api_test.go b/api_test.go index cce4a251..41f3de69 100644 --- a/api_test.go +++ b/api_test.go @@ -250,6 +250,10 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("should have received two invalid digest errors: %v", respErrs) } + // TODO(stevvooe): Add a test case where we take a mostly valid registry, + // tamper with the content and ensure that we get a unverified manifest + // error. + // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) From 54c0290cda67b17d69aaf7c21b9808032165e7aa Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 1 Dec 2014 18:25:10 -0800 Subject: [PATCH 085/165] StorageDrivers should exit non-zero on error (closes #803) --- cmd/registry-storagedriver-filesystem/main.go | 6 +++++- cmd/registry-storagedriver-inmemory/main.go | 5 ++++- cmd/registry-storagedriver-s3/main.go | 6 +++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/cmd/registry-storagedriver-filesystem/main.go b/cmd/registry-storagedriver-filesystem/main.go index 8a5fc93c..5ea1eb70 100644 --- a/cmd/registry-storagedriver-filesystem/main.go +++ b/cmd/registry-storagedriver-filesystem/main.go @@ -4,6 +4,8 @@ import ( "encoding/json" "os" + "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/storagedriver/filesystem" "github.com/docker/docker-registry/storagedriver/ipc" ) @@ -17,5 +19,7 @@ func main() { panic(err) } - ipc.StorageDriverServer(filesystem.FromParameters(parameters)) + if err := ipc.StorageDriverServer(filesystem.FromParameters(parameters)); err != nil { + logrus.Fatalln(err) + } } diff --git a/cmd/registry-storagedriver-inmemory/main.go b/cmd/registry-storagedriver-inmemory/main.go index 999c05d7..77b1c530 100644 --- a/cmd/registry-storagedriver-inmemory/main.go +++ b/cmd/registry-storagedriver-inmemory/main.go @@ -1,6 +1,7 @@ package main import ( + "github.com/Sirupsen/logrus" "github.com/docker/docker-registry/storagedriver/inmemory" "github.com/docker/docker-registry/storagedriver/ipc" ) @@ -8,5 +9,7 @@ import ( // An out-of-process inmemory driver, intended to be run by ipc.NewDriverClient // This exists primarily for example and testing purposes func main() { - ipc.StorageDriverServer(inmemory.New()) + if err := ipc.StorageDriverServer(inmemory.New()); err != nil { + logrus.Fatalln(err) + } } diff --git a/cmd/registry-storagedriver-s3/main.go b/cmd/registry-storagedriver-s3/main.go index aa5a1180..21192a0f 100644 --- a/cmd/registry-storagedriver-s3/main.go +++ b/cmd/registry-storagedriver-s3/main.go @@ -4,6 +4,8 @@ import ( "encoding/json" "os" + "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/storagedriver/ipc" "github.com/docker/docker-registry/storagedriver/s3" ) @@ -22,5 +24,7 @@ func main() { panic(err) } - ipc.StorageDriverServer(driver) + if err := ipc.StorageDriverServer(driver); err != nil { + logrus.Fatalln(err) + } } From 4054cd3e73756d7c8de305e159912c28361f7603 Mon Sep 17 00:00:00 2001 From: Ahmet Alp Balkan Date: Tue, 25 Nov 2014 10:40:24 -0800 Subject: [PATCH 086/165] Azure storage driver implementation Signed-off-by: Ahmet Alp Balkan --- cmd/registry-storagedriver-azure/main.go | 29 ++ storagedriver/azure/azure.go | 352 +++++++++++++++++++++++ storagedriver/azure/azure_test.go | 65 +++++ 3 files changed, 446 insertions(+) create mode 100644 cmd/registry-storagedriver-azure/main.go create mode 100644 storagedriver/azure/azure.go create mode 100644 storagedriver/azure/azure_test.go diff --git a/cmd/registry-storagedriver-azure/main.go b/cmd/registry-storagedriver-azure/main.go new file mode 100644 index 00000000..b9944342 --- /dev/null +++ b/cmd/registry-storagedriver-azure/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "encoding/json" + "os" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/storagedriver/azure" + "github.com/docker/docker-registry/storagedriver/ipc" +) + +// An out-of-process Azure Storage driver, intended to be run by ipc.NewDriverClient +func main() { + parametersBytes := []byte(os.Args[1]) + var parameters map[string]string + err := json.Unmarshal(parametersBytes, ¶meters) + if err != nil { + panic(err) + } + + driver, err := azure.FromParameters(parameters) + if err != nil { + panic(err) + } + + if err := ipc.StorageDriverServer(driver); err != nil { + log.Fatalln("driver error:", err) + } +} diff --git a/storagedriver/azure/azure.go b/storagedriver/azure/azure.go new file mode 100644 index 00000000..ba716841 --- /dev/null +++ b/storagedriver/azure/azure.go @@ -0,0 +1,352 @@ +// Package azure provides a storagedriver.StorageDriver implementation to +// store blobs in Microsoft Azure Blob Storage Service. +package azure + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" + + azure "github.com/MSOpenTech/azure-sdk-for-go/clients/storage" +) + +const driverName = "azure" + +const ( + paramAccountName = "accountname" + paramAccountKey = "accountkey" + paramContainer = "container" +) + +// Driver is a storagedriver.StorageDriver implementation backed by +// Microsoft Azure Blob Storage Service. +type Driver struct { + client *azure.BlobStorageClient + container string +} + +func init() { + factory.Register(driverName, &azureDriverFactory{}) +} + +type azureDriverFactory struct{} + +func (factory *azureDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +// FromParameters constructs a new Driver with a given parameters map. +func FromParameters(parameters map[string]string) (*Driver, error) { + accountName, ok := parameters[paramAccountName] + if !ok { + return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + } + + accountKey, ok := parameters[paramAccountKey] + if !ok { + return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + } + + container, ok := parameters[paramContainer] + if !ok { + return nil, fmt.Errorf("No %s parameter provided", paramContainer) + } + + return New(accountName, accountKey, container) +} + +// New constructs a new Driver with the given Azure Storage Account credentials +func New(accountName, accountKey, container string) (*Driver, error) { + api, err := azure.NewBasicClient(accountName, accountKey) + if err != nil { + return nil, err + } + + blobClient := api.GetBlobService() + + // Create registry container + if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + return nil, err + } + + return &Driver{ + client: blobClient, + container: container}, nil +} + +// Implement the storagedriver.StorageDriver interface. + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *Driver) GetContent(path string) ([]byte, error) { + blob, err := d.client.GetBlob(d.container, path) + if err != nil { + if is404(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + return ioutil.ReadAll(blob) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *Driver) PutContent(path string, contents []byte) error { + return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { + if ok, err := d.client.BlobExists(d.container, path); err != nil { + return nil, err + } else if !ok { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + size, err := d.CurrentSize(path) + if err != nil { + return nil, err + } + + if offset >= size { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + bytesRange := fmt.Sprintf("%v-", offset) + resp, err := d.client.GetBlobRange(d.container, path, bytesRange) + if err != nil { + return nil, err + } + return resp, nil +} + +// WriteStream stores the contents of the provided io.ReadCloser at a location +// designated by the given path. +func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { + var ( + lastBlockNum int + resumableOffset uint64 + blocks []azure.Block + ) + + if blobExists, err := d.client.BlobExists(d.container, path); err != nil { + return err + } else if !blobExists { // new blob + lastBlockNum = 0 + resumableOffset = 0 + } else { // append + if parts, err := d.client.GetBlockList(d.container, path, azure.BlockListTypeCommitted); err != nil { + return err + } else if len(parts.CommittedBlocks) == 0 { + lastBlockNum = 0 + resumableOffset = 0 + } else { + lastBlock := parts.CommittedBlocks[len(parts.CommittedBlocks)-1] + if lastBlockNum, err = blockNum(lastBlock.Name); err != nil { + return fmt.Errorf("Cannot parse block name as number '%s': %s", lastBlock.Name, err.Error()) + } + + var totalSize uint64 + for _, v := range parts.CommittedBlocks { + blocks = append(blocks, azure.Block{ + Id: v.Name, + Status: azure.BlockStatusCommitted}) + totalSize += uint64(v.Size) + } + + // NOTE: Azure driver currently supports only append mode (resumable + // index is exactly where the committed blocks of the blob end). + // In order to support writing to offsets other than last index, + // adjacent blocks overlapping with the [offset:offset+size] area + // must be fetched, splitted and should be overwritten accordingly. + // As the current use of this method is append only, that implementation + // is omitted. + resumableOffset = totalSize + } + } + + if offset != resumableOffset { + return storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + // Put content + buf := make([]byte, azure.MaxBlobBlockSize) + for { + // Read chunks of exactly size N except the last chunk to + // maximize block size and minimize block count. + n, err := io.ReadFull(reader, buf) + if err == io.EOF { + break + } + + data := buf[:n] + blockID := toBlockID(lastBlockNum + 1) + if err = d.client.PutBlock(d.container, path, blockID, data); err != nil { + return err + } + blocks = append(blocks, azure.Block{ + Id: blockID, + Status: azure.BlockStatusLatest}) + lastBlockNum++ + } + + // Commit block list + return d.client.PutBlockList(d.container, path, blocks) +} + +// CurrentSize retrieves the curernt size in bytes of the object at the given +// path. +func (d *Driver) CurrentSize(path string) (uint64, error) { + props, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return 0, err + } + return props.ContentLength, nil +} + +// List returns a list of the objects that are direct descendants of the given +// path. +func (d *Driver) List(path string) ([]string, error) { + if path == "/" { + path = "" + } + + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return blobs, err + } + + list := directDescendants(blobs, path) + return list, nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *Driver) Move(sourcePath string, destPath string) error { + sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) + err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + if err != nil { + if is404(err) { + return storagedriver.PathNotFoundError{Path: sourcePath} + } + return err + } + + return d.client.DeleteBlob(d.container, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *Driver) Delete(path string) error { + ok, err := d.client.DeleteBlobIfExists(d.container, path) + if err != nil { + return err + } + if ok { + return nil // was a blob and deleted, return + } + + // Not a blob, see if path is a virtual container with blobs + blobs, err := d.listBlobs(d.container, path) + if err != nil { + return err + } + + for _, b := range blobs { + if err = d.client.DeleteBlob(d.container, b); err != nil { + return err + } + } + + if len(blobs) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + return nil +} + +// directDescendants will find direct descendants (blobs or virtual containers) +// of from list of blob paths and will return their full paths. Elements in blobs +// list must be prefixed with a "/" and +// +// Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is +// {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} +func directDescendants(blobs []string, prefix string) []string { + if !strings.HasPrefix(prefix, "/") { // add trailing '/' + prefix = "/" + prefix + } + if !strings.HasSuffix(prefix, "/") { // containerify the path + prefix += "/" + } + + out := make(map[string]bool) + for _, b := range blobs { + if strings.HasPrefix(b, prefix) { + rel := b[len(prefix):] + c := strings.Count(rel, "/") + if c == 0 { + out[b] = true + } else { + out[prefix+rel[:strings.Index(rel, "/")]] = true + } + } + } + + var keys []string + for k := range out { + keys = append(keys, k) + } + return keys +} + +func (d *Driver) listBlobs(container, virtPath string) ([]string, error) { + if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path + virtPath += "/" + } + + out := []string{} + marker := "" + for { + resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + Marker: marker, + Prefix: virtPath, + }) + + if err != nil { + return out, err + } + + for _, b := range resp.Blobs { + out = append(out, b.Name) + } + + if len(resp.Blobs) == 0 || resp.NextMarker == "" { + break + } + marker = resp.NextMarker + } + return out, nil +} + +func is404(err error) bool { + e, ok := err.(azure.StorageServiceError) + return ok && e.StatusCode == 404 +} + +func blockNum(b64Name string) (int, error) { + s, err := base64.StdEncoding.DecodeString(b64Name) + if err != nil { + return 0, err + } + + return strconv.Atoi(string(s)) +} + +func toBlockID(i int) string { + return base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(i))) +} diff --git a/storagedriver/azure/azure_test.go b/storagedriver/azure/azure_test.go new file mode 100644 index 00000000..888d1165 --- /dev/null +++ b/storagedriver/azure/azure_test.go @@ -0,0 +1,65 @@ +package azure + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/testsuites" + . "gopkg.in/check.v1" +) + +const ( + envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" + envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" + envContainer = "AZURE_STORAGE_CONTAINER" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +func init() { + var ( + accountName string + accountKey string + container string + ) + + config := []struct { + env string + value *string + }{ + {envAccountName, &accountName}, + {envAccountKey, &accountKey}, + {envContainer, &container}, + } + + missing := []string{} + for _, v := range config { + *v.value = os.Getenv(v.env) + if *v.value == "" { + missing = append(missing, v.env) + } + } + + azureDriverConstructor := func() (storagedriver.StorageDriver, error) { + return New(accountName, accountKey, container) + } + + // Skip Azure storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if len(missing) > 0 { + return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) + } + return "" + } + + testsuites.RegisterInProcessSuite(azureDriverConstructor, skipCheck) + testsuites.RegisterIPCSuite(driverName, map[string]string{ + paramAccountName: accountName, + paramAccountKey: accountKey, + paramContainer: container, + }, skipCheck) +} From 66107df1afd3d1782ab9a73eccd7b72e4e1bcaaf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 2 Dec 2014 19:01:00 -0800 Subject: [PATCH 087/165] Use int64 for ReadStream and WriteStream offsets This change brings the storagedriver API in line with the Go standard library's use of int64 for offsets. The main benefit is simplicity in interfacing with the io library reducing the number of type conversions in simple code. --- storagedriver/azure/azure.go | 12 +++++------ storagedriver/filesystem/driver.go | 10 +++++---- storagedriver/inmemory/driver.go | 6 +++--- storagedriver/ipc/client.go | 4 ++-- storagedriver/ipc/server.go | 6 +++--- storagedriver/s3/s3.go | 18 ++++++++-------- storagedriver/storagedriver.go | 6 +++--- storagedriver/testsuites/testsuites.go | 30 +++++++++++++------------- 8 files changed, 47 insertions(+), 45 deletions(-) diff --git a/storagedriver/azure/azure.go b/storagedriver/azure/azure.go index ba716841..489a6348 100644 --- a/storagedriver/azure/azure.go +++ b/storagedriver/azure/azure.go @@ -103,7 +103,7 @@ func (d *Driver) PutContent(path string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { @@ -115,7 +115,7 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { return nil, err } - if offset >= size { + if offset >= int64(size) { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -129,10 +129,10 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { var ( lastBlockNum int - resumableOffset uint64 + resumableOffset int64 blocks []azure.Block ) @@ -153,12 +153,12 @@ func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadClo return fmt.Errorf("Cannot parse block name as number '%s': %s", lastBlock.Name, err.Error()) } - var totalSize uint64 + var totalSize int64 for _, v := range parts.CommittedBlocks { blocks = append(blocks, azure.Block{ Id: v.Name, Status: azure.BlockStatusCommitted}) - totalSize += uint64(v.Size) + totalSize += int64(v.Size) } // NOTE: Azure driver currently supports only append mode (resumable diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index a4b2e688..3fbfcdf6 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -80,7 +80,7 @@ func (d *Driver) PutContent(subPath string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.subPath(path), os.O_RDONLY, 0644) if err != nil { return nil, storagedriver.PathNotFoundError{Path: path} @@ -100,7 +100,7 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *Driver) WriteStream(subPath string, offset, size uint64, reader io.ReadCloser) error { +func (d *Driver) WriteStream(subPath string, offset, size int64, reader io.ReadCloser) error { defer reader.Close() resumableOffset, err := d.CurrentSize(subPath) @@ -108,7 +108,7 @@ func (d *Driver) WriteStream(subPath string, offset, size uint64, reader io.Read return err } - if offset > resumableOffset { + if offset > int64(resumableOffset) { return storagedriver.InvalidOffsetError{Path: subPath, Offset: offset} } @@ -131,13 +131,15 @@ func (d *Driver) WriteStream(subPath string, offset, size uint64, reader io.Read } defer file.Close() + // TODO(sday): Use Seek + Copy here. + buf := make([]byte, 32*1024) for { bytesRead, er := reader.Read(buf) if bytesRead > 0 { bytesWritten, ew := file.WriteAt(buf[0:bytesRead], int64(offset)) if bytesWritten > 0 { - offset += uint64(bytesWritten) + offset += int64(bytesWritten) } if ew != nil { err = ew diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 98b068e9..3231b017 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -61,7 +61,7 @@ func (d *Driver) PutContent(path string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() contents, err := d.GetContent(path) @@ -79,7 +79,7 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { defer reader.Close() d.mutex.RLock() defer d.mutex.RUnlock() @@ -89,7 +89,7 @@ func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadClo return err } - if offset > resumableOffset { + if offset > int64(resumableOffset) { return storagedriver.InvalidOffsetError{Path: path, Offset: offset} } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index c77797eb..7e52a084 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -234,7 +234,7 @@ func (driver *StorageDriverClient) PutContent(path string, contents []byte) erro // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +func (driver *StorageDriverClient) ReadStream(path string, offset int64) (io.ReadCloser, error) { if err := driver.exited(); err != nil { return nil, err } @@ -261,7 +261,7 @@ func (driver *StorageDriverClient) ReadStream(path string, offset uint64) (io.Re // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (driver *StorageDriverClient) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +func (driver *StorageDriverClient) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { if err := driver.exited(); err != nil { return err } diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 7d1876ca..1c0084f9 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -100,7 +100,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { case "ReadStream": path, _ := request.Parameters["Path"].(string) // Depending on serialization method, Offset may be convereted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(uint64(0))).Uint() + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() reader, err := driver.ReadStream(path, offset) var response ReadStreamResponse if err != nil { @@ -115,9 +115,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) { case "WriteStream": path, _ := request.Parameters["Path"].(string) // Depending on serialization method, Offset may be convereted to any int/uint type - offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(uint64(0))).Uint() + offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int() // Depending on serialization method, Size may be convereted to any int/uint type - size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(uint64(0))).Uint() + size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int() reader, _ := request.Parameters["Reader"].(io.ReadCloser) err := driver.WriteStream(path, offset, size, reader) response := WriteStreamResponse{ diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index def03e3e..3d5cd511 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -17,7 +17,7 @@ const driverName = "s3" // minChunkSize defines the minimum multipart upload chunk size // S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = uint64(5 * 1024 * 1024) +const minChunkSize = 5 * 1024 * 1024 // listPartsMax is the largest amount of parts you can request from S3 const listPartsMax = 1000 @@ -120,9 +120,9 @@ func (d *Driver) PutContent(path string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { +func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatUint(offset, 10)+"-") + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") resp, err := d.Bucket.GetResponseWithHeaders(path, headers) if err != nil { @@ -133,22 +133,22 @@ func (d *Driver) ReadStream(path string, offset uint64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadCloser) error { +func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { defer reader.Close() - chunkSize := minChunkSize + chunkSize := int64(minChunkSize) for size/chunkSize >= listPartsMax { chunkSize *= 2 } partNumber := 1 - totalRead := uint64(0) + var totalRead int64 multi, parts, err := d.getAllParts(path) if err != nil { return err } - if (offset) > uint64(len(parts))*chunkSize || (offset < size && offset%chunkSize != 0) { + if (offset) > int64(len(parts))*chunkSize || (offset < size && offset%chunkSize != 0) { return storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -161,11 +161,11 @@ func (d *Driver) WriteStream(path string, offset, size uint64, reader io.ReadClo buf := make([]byte, chunkSize) for { bytesRead, err := io.ReadFull(reader, buf) - totalRead += uint64(bytesRead) + totalRead += int64(bytesRead) if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { return err - } else if (uint64(bytesRead) < chunkSize) && totalRead != size { + } else if (int64(bytesRead) < chunkSize) && totalRead != size { break } else { part, err := multi.PutPart(int(partNumber), bytes.NewReader(buf[0:bytesRead])) diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index 1b6c5c00..d257e4b2 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -44,7 +44,7 @@ type StorageDriver interface { // ReadStream retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(path string, offset uint64) (io.ReadCloser, error) + ReadStream(path string, offset int64) (io.ReadCloser, error) // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. @@ -52,7 +52,7 @@ type StorageDriver interface { // "size" bytes. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. - WriteStream(path string, offset, size uint64, readCloser io.ReadCloser) error + WriteStream(path string, offset, size int64, readCloser io.ReadCloser) error // CurrentSize retrieves the curernt size in bytes of the object at the // given path. @@ -86,7 +86,7 @@ func (err PathNotFoundError) Error() string { // invalid offset. type InvalidOffsetError struct { Path string - Offset uint64 + Offset int64 } func (err InvalidOffsetError) Error() string { diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 61756667..c2604f4f 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -173,7 +173,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) - chunkSize := uint64(10 * 1024 * 1024) + chunkSize := int64(10 * 1024 * 1024) contentsChunk1 := []byte(randomString(chunkSize)) contentsChunk2 := []byte(randomString(chunkSize)) @@ -186,19 +186,19 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { offset, err := suite.StorageDriver.CurrentSize(filename) c.Assert(err, check.IsNil) - if offset > chunkSize { + if int64(offset) > chunkSize { c.Fatalf("Offset too large, %d > %d", offset, chunkSize) } - err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) + err = suite.StorageDriver.WriteStream(filename, int64(offset), 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) c.Assert(err, check.IsNil) offset, err = suite.StorageDriver.CurrentSize(filename) c.Assert(err, check.IsNil) - if offset > 2*chunkSize { + if int64(offset) > 2*chunkSize { c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize) } - err = suite.StorageDriver.WriteStream(filename, offset, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:]))) + err = suite.StorageDriver.WriteStream(filename, int64(offset), 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:]))) c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(filename) @@ -212,7 +212,7 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomString(32) defer suite.StorageDriver.Delete(filename) - chunkSize := uint64(32) + chunkSize := int64(32) contentsChunk1 := []byte(randomString(chunkSize)) contentsChunk2 := []byte(randomString(chunkSize)) @@ -260,13 +260,13 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomString(uint64(8+rand.Intn(8))) + rootDirectory := "/" + randomString(int64(8+rand.Intn(8))) defer suite.StorageDriver.Delete(rootDirectory) - parentDirectory := rootDirectory + "/" + randomString(uint64(8+rand.Intn(8))) + parentDirectory := rootDirectory + "/" + randomString(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomString(uint64(8+rand.Intn(8))) + childFile := parentDirectory + "/" + randomString(int64(8+rand.Intn(8))) childFiles[i] = childFile err := suite.StorageDriver.PutContent(childFile, []byte(randomString(32))) c.Assert(err, check.IsNil) @@ -388,7 +388,7 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { doneChan := make(chan struct{}) - testStream := func(size int) { + testStream := func(size int64) { suite.testFileStreams(c, size) doneChan <- struct{}{} } @@ -406,7 +406,7 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { } -func (suite *DriverSuite) testFileStreams(c *check.C, size int) { +func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf, err := ioutil.TempFile("", "tf") c.Assert(err, check.IsNil) defer os.Remove(tf.Name()) @@ -414,7 +414,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int) { tfName := path.Base(tf.Name()) defer suite.StorageDriver.Delete(tfName) - contents := []byte(randomString(uint64(size))) + contents := []byte(randomString(size)) _, err = tf.Write(contents) c.Assert(err, check.IsNil) @@ -422,7 +422,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int) { tf.Sync() tf.Seek(0, os.SEEK_SET) - err = suite.StorageDriver.WriteStream(tfName, 0, uint64(size), tf) + err = suite.StorageDriver.WriteStream(tfName, 0, size, tf) c.Assert(err, check.IsNil) reader, err := suite.StorageDriver.ReadStream(tfName, 0) @@ -450,7 +450,7 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { defer suite.StorageDriver.Delete(filename) - err := suite.StorageDriver.WriteStream(filename, 0, uint64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) + err := suite.StorageDriver.WriteStream(filename, 0, int64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) c.Assert(err, check.IsNil) reader, err := suite.StorageDriver.ReadStream(filename, 0) @@ -465,7 +465,7 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c var pathChars = []byte("abcdefghijklmnopqrstuvwxyz") -func randomString(length uint64) string { +func randomString(length int64) string { b := make([]byte, length) for i := range b { b[i] = pathChars[rand.Intn(len(pathChars))] From b047c92e1cdb76635ff19d80b97c3d40e1901bdf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 2 Dec 2014 20:43:31 -0800 Subject: [PATCH 088/165] Use sync.WaitGroup to control concurrent tests --- storagedriver/testsuites/testsuites.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index c2604f4f..92f7454d 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -7,6 +7,7 @@ import ( "os" "path" "sort" + "sync" "testing" "github.com/docker/docker-registry/storagedriver" @@ -386,13 +387,14 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { c.Skip("Need to fix out-of-process concurrency") } - doneChan := make(chan struct{}) + var wg sync.WaitGroup testStream := func(size int64) { + defer wg.Done() suite.testFileStreams(c, size) - doneChan <- struct{}{} } + wg.Add(6) go testStream(8 * 1024 * 1024) go testStream(4 * 1024 * 1024) go testStream(2 * 1024 * 1024) @@ -400,10 +402,7 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { go testStream(1024) go testStream(64) - for i := 0; i < 6; i++ { - <-doneChan - } - + wg.Wait() } func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { From ac660e72bfbc13cdf12b46252f161b5c6c3caac0 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 2 Dec 2014 21:00:42 -0800 Subject: [PATCH 089/165] Replace StorageLayer.CurrentSize interface call with Stat To support single-flight Size and ModTime queries against backend storage file, we are replacing the CurrentSize call with a Stat call. A FileInfo interface is provided for backends to provide a type, with a default implementation called FileInfoInternal, for use by driver implementations. More work needs to follow this change to update all the driver implementations. --- storagedriver/fileinfo.go | 79 ++++++++++++++++++++++++++++++++++ storagedriver/storagedriver.go | 7 ++- 2 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 storagedriver/fileinfo.go diff --git a/storagedriver/fileinfo.go b/storagedriver/fileinfo.go new file mode 100644 index 00000000..82e3d546 --- /dev/null +++ b/storagedriver/fileinfo.go @@ -0,0 +1,79 @@ +package storagedriver + +import "time" + +// FileInfo returns information about a given path. Inspired by os.FileInfo, +// it elides the base name method for a full path instead. +type FileInfo interface { + // Path provides the full path of the target of this file info. + Path() string + + // Size returns current length in bytes of the file. The return value can + // be used to write to the end of the file at path. The value is + // meaningless if IsDir returns true. + Size() int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime() time.Time + + // IsDir returns true if the path is a directory. + IsDir() bool +} + +// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal +// should only be used by storagedriver implementations. They should moved to +// a "driver" package, similar to database/sql. + +// FileInfoFields provides the exported fields for implementing FileInfo +// interface in storagedriver implementations. It should be used with +// InternalFileInfo. +type FileInfoFields struct { + // Path provides the full path of the target of this file info. + Path string + + // Size is current length in bytes of the file. The value of this field + // can be used to write to the end of the file at path. The value is + // meaningless if IsDir is set to true. + Size int64 + + // ModTime returns the modification time for the file. For backends that + // don't have a modification time, the creation time should be returned. + ModTime time.Time + + // IsDir returns true if the path is a directory. + IsDir bool +} + +// FileInfoInternal implements the FileInfo interface. This should only be +// used by storagedriver implementations that don't have a specialized +// FileInfo type. +type FileInfoInternal struct { + FileInfoFields +} + +var _ FileInfo = FileInfoInternal{} +var _ FileInfo = &FileInfoInternal{} + +// Path provides the full path of the target of this file info. +func (fi FileInfoInternal) Path() string { + return fi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi FileInfoInternal) Size() int64 { + return fi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi FileInfoInternal) ModTime() time.Time { + return fi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (fi FileInfoInternal) IsDir() bool { + return fi.FileInfoFields.IsDir +} diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index d257e4b2..754c8bb6 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -54,10 +54,9 @@ type StorageDriver interface { // The offset must be no larger than the CurrentSize for this path. WriteStream(path string, offset, size int64, readCloser io.ReadCloser) error - // CurrentSize retrieves the curernt size in bytes of the object at the - // given path. - // It should be safe to read or write anywhere up to this point. - CurrentSize(path string) (uint64, error) + // Stat retrieves the FileInfo for the given path, including the current + // size in bytes and the creation time. + Stat(path string) (FileInfo, error) // List returns a list of the objects that are direct descendants of the //given path. From 2e3ecdca37622933742e9ac405c96d275ef4ed69 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 2 Dec 2014 21:47:28 -0800 Subject: [PATCH 090/165] Remove size argument and using io.Reader for StorageDriver.WriteStream We are change the the rpc call for WriteStream to not require the size argument, opting to drive the process with io.Reader. The main issue was that io.Reader may return io.EOF before reaching size, making the error handling around this condition for callers more complex. To complement this, WriteStream now returns the number of successfully written bytes. The method no longer requires an io.ReadCloser, opting to require just an io.Reader. This keeps the reader under the control of the caller, which provides more flexibility. This also begins to address some of the problems described in #791. --- storagedriver/storagedriver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index 754c8bb6..339b465a 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -52,7 +52,7 @@ type StorageDriver interface { // "size" bytes. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. - WriteStream(path string, offset, size int64, readCloser io.ReadCloser) error + WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. From 2037b1d6bf20e380887a05151e5521319c004548 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 3 Dec 2014 16:37:46 -0800 Subject: [PATCH 091/165] Update testsuite with storagedriver interface changes This change updates the testsuite to migrate to the new driver interface. This includes the new Stat call, changes to int64 over uint64 and the changes to the WriteStream signature. Several test cases have been added to vet implementations against various assumptions. --- storagedriver/testsuites/testsuites.go | 180 +++++++++++++++++++------ 1 file changed, 138 insertions(+), 42 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 92f7454d..6a51cd19 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -9,6 +9,7 @@ import ( "sort" "sync" "testing" + "time" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/ipc" @@ -168,45 +169,6 @@ func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { suite.writeReadCompareStreams(c, filename, contents) } -// TestContinueStreamAppend tests that a stream write can be appended to without -// corrupting the data. -func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { - filename := randomString(32) - defer suite.StorageDriver.Delete(filename) - - chunkSize := int64(10 * 1024 * 1024) - - contentsChunk1 := []byte(randomString(chunkSize)) - contentsChunk2 := []byte(randomString(chunkSize)) - contentsChunk3 := []byte(randomString(chunkSize)) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - err := suite.StorageDriver.WriteStream(filename, 0, 3*chunkSize, ioutil.NopCloser(bytes.NewReader(contentsChunk1))) - c.Assert(err, check.IsNil) - - offset, err := suite.StorageDriver.CurrentSize(filename) - c.Assert(err, check.IsNil) - if int64(offset) > chunkSize { - c.Fatalf("Offset too large, %d > %d", offset, chunkSize) - } - err = suite.StorageDriver.WriteStream(filename, int64(offset), 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:2*chunkSize]))) - c.Assert(err, check.IsNil) - - offset, err = suite.StorageDriver.CurrentSize(filename) - c.Assert(err, check.IsNil) - if int64(offset) > 2*chunkSize { - c.Fatalf("Offset too large, %d > %d", offset, 2*chunkSize) - } - - err = suite.StorageDriver.WriteStream(filename, int64(offset), 3*chunkSize, ioutil.NopCloser(bytes.NewReader(fullContents[offset:]))) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) -} - // TestReadStreamWithOffset tests that the appropriate data is streamed when // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { @@ -246,10 +208,90 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { readContents, err = ioutil.ReadAll(reader) c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) } +// TestContinueStreamAppend tests that a stream write can be appended to without +// corrupting the data. +func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { + filename := randomString(32) + defer suite.StorageDriver.Delete(filename) + + chunkSize := int64(10 * 1024 * 1024) + + contentsChunk1 := []byte(randomString(chunkSize)) + contentsChunk2 := []byte(randomString(chunkSize)) + contentsChunk3 := []byte(randomString(chunkSize)) + contentsChunk4 := []byte(randomString(chunkSize)) + zeroChunk := make([]byte, int64(chunkSize)) + + fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) + + nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contentsChunk1)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk1))) + + fi, err := suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) + + if fi.Size() > chunkSize { + c.Fatalf("Offset too large, %d > %d", fi.Size(), chunkSize) + } + nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contentsChunk2))) + + fi, err = suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, 2*chunkSize) + + if fi.Size() > 2*chunkSize { + c.Fatalf("Offset too large, %d > %d", fi.Size(), 2*chunkSize) + } + + nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) + + received, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, fullContents) + + // Writing past size of file extends file (no offest error). We would like + // to write chunk 4 one chunk length past chunk 3. It should be successful + // and the resulting file will be 5 chunks long, with a chunk of all + // zeros. + + fullContents = append(fullContents, zeroChunk...) + fullContents = append(fullContents, contentsChunk4...) + + nn, err = suite.StorageDriver.WriteStream(filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) + c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, chunkSize) + + fi, err = suite.StorageDriver.Stat(filename) + c.Assert(err, check.IsNil) + c.Assert(fi, check.NotNil) + c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) + + received, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(len(received), check.Equals, len(fullContents)) + c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) + c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) + c.Assert(received, check.DeepEquals, fullContents) + + // Ensure that negative offsets return correct error. + nn, err = suite.StorageDriver.WriteStream(filename, -1, bytes.NewReader(zeroChunk)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) +} + // TestReadNonexistentStream tests that reading a stream for a nonexistent path // fails. func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { @@ -379,6 +421,58 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } +func (suite *DriverSuite) TestStatCall(c *check.C) { + content := randomString(4096) + dirPath := randomString(32) + fileName := randomString(32) + filePath := path.Join(dirPath, fileName) + + // Call on non-existent file/dir, check error. + fi, err := suite.StorageDriver.Stat(filePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + err = suite.StorageDriver.PutContent(filePath, []byte(content)) + c.Assert(err, check.IsNil) + + // Call on regular file, check results + start := time.Now().Truncate(time.Second) // truncated for filesystem + fi, err = suite.StorageDriver.Stat(filePath) + c.Assert(err, check.IsNil) + expectedModTime := time.Now() + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, filePath) + c.Assert(fi.Size(), check.Equals, int64(len(content))) + c.Assert(fi.IsDir(), check.Equals, false) + + if start.After(fi.ModTime()) { + c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start) + } + + if fi.ModTime().After(expectedModTime) { + c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) + } + + // Call on directory + start = time.Now().Truncate(time.Second) + fi, err = suite.StorageDriver.Stat(dirPath) + c.Assert(err, check.IsNil) + expectedModTime = time.Now() + c.Assert(fi, check.NotNil) + c.Assert(fi.Path(), check.Equals, dirPath) + c.Assert(fi.Size(), check.Equals, int64(0)) + c.Assert(fi.IsDir(), check.Equals, true) + + if start.After(fi.ModTime()) { + c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start) + } + + if fi.ModTime().After(expectedModTime) { + c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) + } +} + // TestConcurrentFileStreams checks that multiple *os.File objects can be passed // in to WriteStream concurrently without hanging. // TODO(bbland): fix this test... @@ -421,8 +515,9 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf.Sync() tf.Seek(0, os.SEEK_SET) - err = suite.StorageDriver.WriteStream(tfName, 0, size, tf) + nn, err := suite.StorageDriver.WriteStream(tfName, 0, tf) c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, size) reader, err := suite.StorageDriver.ReadStream(tfName, 0) c.Assert(err, check.IsNil) @@ -449,8 +544,9 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { defer suite.StorageDriver.Delete(filename) - err := suite.StorageDriver.WriteStream(filename, 0, int64(len(contents)), ioutil.NopCloser(bytes.NewReader(contents))) + nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) + c.Assert(nn, check.Equals, int64(len(contents))) reader, err := suite.StorageDriver.ReadStream(filename, 0) c.Assert(err, check.IsNil) From ab9570f87217adb0f66794711d3115656c86a828 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 3 Dec 2014 16:44:20 -0800 Subject: [PATCH 092/165] Migrate filesystem driver to new storagedriver calls The filesystem driver has been migrated to impleemnt the storagedriver interface changes. Most interetingly, this provides a filesystem-based implementation of the Stat driver call. With this comes some refactoring of Reads and Write to be much simpler and more robust. The IPC tests have been disabled to stability problems that we'll have to troubleshoot at a later date. --- storagedriver/filesystem/driver.go | 185 ++++++++++++++---------- storagedriver/filesystem/driver_test.go | 19 ++- 2 files changed, 118 insertions(+), 86 deletions(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 3fbfcdf6..6fb56891 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -1,10 +1,13 @@ package filesystem import ( + "bytes" + "fmt" "io" "io/ioutil" "os" "path" + "time" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/factory" @@ -49,41 +52,43 @@ func New(rootDirectory string) *Driver { return &Driver{rootDirectory} } -// subPath returns the absolute path of a key within the Driver's storage -func (d *Driver) subPath(subPath string) string { - return path.Join(d.rootDirectory, subPath) -} - // Implement the storagedriver.StorageDriver interface // GetContent retrieves the content stored at "path" as a []byte. func (d *Driver) GetContent(path string) ([]byte, error) { - contents, err := ioutil.ReadFile(d.subPath(path)) + rc, err := d.ReadStream(path, 0) if err != nil { - return nil, storagedriver.PathNotFoundError{Path: path} + return nil, err } - return contents, nil + defer rc.Close() + + p, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + return p, nil } // PutContent stores the []byte content at a location designated by "path". func (d *Driver) PutContent(subPath string, contents []byte) error { - fullPath := d.subPath(subPath) - parentDir := path.Dir(fullPath) - err := os.MkdirAll(parentDir, 0755) - if err != nil { + if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { return err } - err = ioutil.WriteFile(fullPath, contents, 0644) - return err + return os.Truncate(d.fullPath(subPath), int64(len(contents))) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { - file, err := os.OpenFile(d.subPath(path), os.O_RDONLY, 0644) + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { - return nil, storagedriver.PathNotFoundError{Path: path} + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return nil, err } seekPos, err := file.Seek(int64(offset), os.SEEK_SET) @@ -98,81 +103,64 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { return file, nil } -// WriteStream stores the contents of the provided io.ReadCloser at a location +// WriteStream stores the contents of the provided io.Reader at a location // designated by the given path. -func (d *Driver) WriteStream(subPath string, offset, size int64, reader io.ReadCloser) error { - defer reader.Close() - - resumableOffset, err := d.CurrentSize(subPath) - if _, pathNotFound := err.(storagedriver.PathNotFoundError); err != nil && !pathNotFound { - return err +func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: subPath, Offset: offset} } - if offset > int64(resumableOffset) { - return storagedriver.InvalidOffsetError{Path: subPath, Offset: offset} - } + // TODO(stevvooe): This needs to be a requirement. + // if !path.IsAbs(subPath) { + // return fmt.Errorf("absolute path required: %q", subPath) + // } - fullPath := d.subPath(subPath) + fullPath := d.fullPath(subPath) parentDir := path.Dir(fullPath) - err = os.MkdirAll(parentDir, 0755) + if err := os.MkdirAll(parentDir, 0755); err != nil { + return 0, err + } + + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { - return err - } - - var file *os.File - if offset == 0 { - file, err = os.Create(fullPath) - } else { - file, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_APPEND, 0) + // TODO(stevvooe): A few missing conditions in storage driver: + // 1. What if the path is already a directory? + // 2. Should number 1 be exposed explicitly in storagedriver? + // 2. Can this path not exist, even if we create above? + return 0, err } + defer fp.Close() + nn, err = fp.Seek(offset, os.SEEK_SET) if err != nil { - return err + return 0, err } - defer file.Close() - // TODO(sday): Use Seek + Copy here. - - buf := make([]byte, 32*1024) - for { - bytesRead, er := reader.Read(buf) - if bytesRead > 0 { - bytesWritten, ew := file.WriteAt(buf[0:bytesRead], int64(offset)) - if bytesWritten > 0 { - offset += int64(bytesWritten) - } - if ew != nil { - err = ew - break - } - if bytesRead != bytesWritten { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } + if nn != offset { + return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) } - return err + + return io.Copy(fp, reader) } -// CurrentSize retrieves the curernt size in bytes of the object at the given -// path. -func (d *Driver) CurrentSize(subPath string) (uint64, error) { - fullPath := d.subPath(subPath) +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) { + fullPath := d.fullPath(subPath) - fileInfo, err := os.Stat(fullPath) - if err != nil && !os.IsNotExist(err) { - return 0, err - } else if err != nil { - return 0, storagedriver.PathNotFoundError{Path: subPath} + fi, err := os.Stat(fullPath) + if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } + + return nil, err } - return uint64(fileInfo.Size()), nil + + return fileInfo{ + path: subPath, + FileInfo: fi, + }, nil } // List returns a list of the objects that are direct descendants of the given @@ -181,7 +169,7 @@ func (d *Driver) List(subPath string) ([]string, error) { if subPath[len(subPath)-1] != '/' { subPath += "/" } - fullPath := d.subPath(subPath) + fullPath := d.fullPath(subPath) dir, err := os.Open(fullPath) if err != nil { @@ -204,8 +192,8 @@ func (d *Driver) List(subPath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *Driver) Move(sourcePath string, destPath string) error { - source := d.subPath(sourcePath) - dest := d.subPath(destPath) + source := d.fullPath(sourcePath) + dest := d.fullPath(destPath) if _, err := os.Stat(source); os.IsNotExist(err) { return storagedriver.PathNotFoundError{Path: sourcePath} @@ -217,7 +205,7 @@ func (d *Driver) Move(sourcePath string, destPath string) error { // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *Driver) Delete(subPath string) error { - fullPath := d.subPath(subPath) + fullPath := d.fullPath(subPath) _, err := os.Stat(fullPath) if err != nil && !os.IsNotExist(err) { @@ -229,3 +217,42 @@ func (d *Driver) Delete(subPath string) error { err = os.RemoveAll(fullPath) return err } + +// fullPath returns the absolute path of a key within the Driver's storage. +func (d *Driver) fullPath(subPath string) string { + return path.Join(d.rootDirectory, subPath) +} + +type fileInfo struct { + os.FileInfo + path string +} + +var _ storagedriver.FileInfo = fileInfo{} + +// Path provides the full path of the target of this file info. +func (fi fileInfo) Path() string { + return fi.path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (fi fileInfo) Size() int64 { + if fi.IsDir() { + return 0 + } + + return fi.FileInfo.Size() +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (fi fileInfo) ModTime() time.Time { + return fi.FileInfo.ModTime() +} + +// IsDir returns true if the path is a directory. +func (fi fileInfo) IsDir() bool { + return fi.FileInfo.IsDir() +} diff --git a/storagedriver/filesystem/driver_test.go b/storagedriver/filesystem/driver_test.go index 1d9bac54..0965daa4 100644 --- a/storagedriver/filesystem/driver_test.go +++ b/storagedriver/filesystem/driver_test.go @@ -1,6 +1,7 @@ package filesystem import ( + "io/ioutil" "os" "testing" @@ -13,12 +14,16 @@ import ( func Test(t *testing.T) { TestingT(t) } func init() { - rootDirectory := "/tmp/driver" - os.RemoveAll(rootDirectory) - - filesystemDriverConstructor := func() (storagedriver.StorageDriver, error) { - return New(rootDirectory), nil + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) } - testsuites.RegisterInProcessSuite(filesystemDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": rootDirectory}, testsuites.NeverSkip) + defer os.Remove(root) + + testsuites.RegisterInProcessSuite(func() (storagedriver.StorageDriver, error) { + return New(root), nil + }, testsuites.NeverSkip) + + // BUG(stevvooe): IPC is broken so we're disabling for now. Will revisit later. + // testsuites.RegisterIPCSuite(driverName, map[string]string{"rootdirectory": root}, testsuites.NeverSkip) } From 2ebc373d917ecc12cecaf566e0a90a3de0e849f3 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 4 Dec 2014 20:14:41 -0800 Subject: [PATCH 093/165] Refactor inmemory driver for Stat and WriteStream methods This change started out as simply updating the existing inmemory driver to implement the new Stat call. After struggling with the map based implementation, it has been refactored to be a tree-based implementation. This process has exposed a few missing error cases in the StorageDriver API that should be addressed in the coming weeks. --- storagedriver/inmemory/driver.go | 199 +++++++++------- storagedriver/inmemory/driver_test.go | 5 +- storagedriver/inmemory/mfs.go | 329 ++++++++++++++++++++++++++ 3 files changed, 453 insertions(+), 80 deletions(-) create mode 100644 storagedriver/inmemory/mfs.go diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 3231b017..b6bdc258 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -5,9 +5,9 @@ import ( "fmt" "io" "io/ioutil" - "regexp" "strings" "sync" + "time" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/factory" @@ -29,13 +29,18 @@ func (factory *inMemoryDriverFactory) Create(parameters map[string]string) (stor // Driver is a storagedriver.StorageDriver implementation backed by a local map. // Intended solely for example and testing purposes. type Driver struct { - storage map[string][]byte - mutex sync.RWMutex + root *dir + mutex sync.RWMutex } // New constructs a new Driver. func New() *Driver { - return &Driver{storage: make(map[string][]byte)} + return &Driver{root: &dir{ + common: common{ + p: "/", + mod: time.Now(), + }, + }} } // Implement the storagedriver.StorageDriver interface. @@ -44,18 +49,31 @@ func New() *Driver { func (d *Driver) GetContent(path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - contents, ok := d.storage[path] - if !ok { - return nil, storagedriver.PathNotFoundError{Path: path} + + rc, err := d.ReadStream(path, 0) + if err != nil { + return nil, err } - return contents, nil + defer rc.Close() + + return ioutil.ReadAll(rc) } // PutContent stores the []byte content at a location designated by "path". -func (d *Driver) PutContent(path string, contents []byte) error { +func (d *Driver) PutContent(p string, contents []byte) error { d.mutex.Lock() defer d.mutex.Unlock() - d.storage[path] = contents + + f, err := d.root.mkfile(p) + if err != nil { + // TODO(stevvooe): Again, we need to clarify when this is not a + // directory in StorageDriver API. + return fmt.Errorf("not a file") + } + + f.truncate() + f.WriteAt(contents, 0) + return nil } @@ -64,86 +82,104 @@ func (d *Driver) PutContent(path string, contents []byte) error { func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() - contents, err := d.GetContent(path) - if err != nil { - return nil, err - } else if len(contents) <= int(offset) { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + + path = d.normalize(path) + found := d.root.find(path) + + if found.path() != path { + return nil, storagedriver.PathNotFoundError{Path: path} } - src := contents[offset:] - buf := make([]byte, len(src)) - copy(buf, src) - return ioutil.NopCloser(bytes.NewReader(buf)), nil + if found.isdir() { + return nil, fmt.Errorf("%q is a directory", path) + } + + return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil } // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. -func (d *Driver) WriteStream(path string, offset, size int64, reader io.ReadCloser) error { - defer reader.Close() - d.mutex.RLock() - defer d.mutex.RUnlock() +func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + d.mutex.Lock() + defer d.mutex.Unlock() - resumableOffset, err := d.CurrentSize(path) + if offset < 0 { + return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + + normalized := d.normalize(path) + + f, err := d.root.mkfile(normalized) if err != nil { - return err + return 0, fmt.Errorf("not a file") } - if offset > int64(resumableOffset) { - return storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } + var buf bytes.Buffer - contents, err := ioutil.ReadAll(reader) + nn, err = buf.ReadFrom(reader) if err != nil { - return err + // TODO(stevvooe): This condition is odd and we may need to clarify: + // we've read nn bytes from reader but have written nothing to the + // backend. What is the correct return value? Really, the caller needs + // to know that the reader has been advanced and reattempting the + // operation is incorrect. + return nn, err } - if offset > 0 { - contents = append(d.storage[path][0:offset], contents...) - } - - d.storage[path] = contents - return nil + f.WriteAt(buf.Bytes(), offset) + return nn, err } -// CurrentSize retrieves the curernt size in bytes of the object at the given -// path. -func (d *Driver) CurrentSize(path string) (uint64, error) { +// Stat returns info about the provided path. +func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) { d.mutex.RLock() defer d.mutex.RUnlock() - contents, ok := d.storage[path] - if !ok { - return 0, nil + + normalized := d.normalize(path) + found := d.root.find(path) + + if found.path() != normalized { + return nil, storagedriver.PathNotFoundError{Path: path} } - return uint64(len(contents)), nil + + fi := storagedriver.FileInfoFields{ + Path: path, + IsDir: found.isdir(), + ModTime: found.modtime(), + } + + if !fi.IsDir { + fi.Size = int64(len(found.(*file).data)) + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(path string) ([]string, error) { - if path[len(path)-1] != '/' { - path += "/" - } - subPathMatcher, err := regexp.Compile(fmt.Sprintf("^%s[^/]+", path)) - if err != nil { - return nil, err + normalized := d.normalize(path) + + found := d.root.find(normalized) + + if !found.isdir() { + return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... } - d.mutex.RLock() - defer d.mutex.RUnlock() - // we use map to collect unique keys - keySet := make(map[string]struct{}) - for k := range d.storage { - if key := subPathMatcher.FindString(k); key != "" { - keySet[key] = struct{}{} + entries, err := found.(*dir).list(normalized) + + if err != nil { + switch err { + case errNotExists: + return nil, storagedriver.PathNotFoundError{Path: path} + case errIsNotDir: + return nil, fmt.Errorf("not a directory") + default: + return nil, err } } - keys := make([]string, 0, len(keySet)) - for k := range keySet { - keys = append(keys, k) - } - return keys, nil + return entries, nil } // Move moves an object stored at sourcePath to destPath, removing the original @@ -151,32 +187,37 @@ func (d *Driver) List(path string) ([]string, error) { func (d *Driver) Move(sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() - contents, ok := d.storage[sourcePath] - if !ok { - return storagedriver.PathNotFoundError{Path: sourcePath} + + normalizedSrc, normalizedDst := d.normalize(sourcePath), d.normalize(destPath) + + err := d.root.move(normalizedSrc, normalizedDst) + switch err { + case errNotExists: + return storagedriver.PathNotFoundError{Path: destPath} + default: + return err } - d.storage[destPath] = contents - delete(d.storage, sourcePath) - return nil } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *Driver) Delete(path string) error { d.mutex.Lock() defer d.mutex.Unlock() - var subPaths []string - for k := range d.storage { - if strings.HasPrefix(k, path) { - subPaths = append(subPaths, k) - } - } - if len(subPaths) == 0 { + normalized := d.normalize(path) + + err := d.root.delete(normalized) + switch err { + case errNotExists: return storagedriver.PathNotFoundError{Path: path} + default: + return err } - - for _, subPath := range subPaths { - delete(d.storage, subPath) - } - return nil +} + +func (d *Driver) normalize(p string) string { + if !strings.HasPrefix(p, "/") { + p = "/" + p // Ghetto path absolution. + } + return p } diff --git a/storagedriver/inmemory/driver_test.go b/storagedriver/inmemory/driver_test.go index 87549542..6a4b3697 100644 --- a/storagedriver/inmemory/driver_test.go +++ b/storagedriver/inmemory/driver_test.go @@ -17,5 +17,8 @@ func init() { return New(), nil } testsuites.RegisterInProcessSuite(inmemoryDriverConstructor, testsuites.NeverSkip) - testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) + + // BUG(stevvooe): Disable flaky IPC tests for now when we can troubleshoot + // the problems with libchan. + // testsuites.RegisterIPCSuite(driverName, nil, testsuites.NeverSkip) } diff --git a/storagedriver/inmemory/mfs.go b/storagedriver/inmemory/mfs.go new file mode 100644 index 00000000..5248bbc6 --- /dev/null +++ b/storagedriver/inmemory/mfs.go @@ -0,0 +1,329 @@ +package inmemory + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +var ( + errExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("exists") + errIsNotDir = fmt.Errorf("notdir") + errIsDir = fmt.Errorf("isdir") +) + +type node interface { + name() string + path() string + isdir() bool + modtime() time.Time +} + +// dir is the central type for the memory-based storagedriver. All operations +// are dispatched from a root dir. +type dir struct { + common + + // TODO(stevvooe): Use sorted slice + search. + children map[string]node +} + +var _ node = &dir{} + +func (d *dir) isdir() bool { + return true +} + +// add places the node n into dir d. +func (d *dir) add(n node) { + if d.children == nil { + d.children = make(map[string]node) + } + + d.children[n.name()] = n + d.mod = time.Now() +} + +// find searches for the node, given path q in dir. If the node is found, it +// will be returned. If the node is not found, the closet existing parent. If +// the node is found, the returned (node).path() will match q. +func (d *dir) find(q string) node { + q = strings.Trim(q, "/") + i := strings.Index(q, "/") + + if q == "" { + return d + } + + if i == 0 { + panic("shouldn't happen, no root paths") + } + + var component string + if i < 0 { + // No more path components + component = q + } else { + component = q[:i] + } + + child, ok := d.children[component] + if !ok { + // Node was not found. Return p and the current node. + return d + } + + if child.isdir() { + // traverse down! + q = q[i+1:] + return child.(*dir).find(q) + } + + return child +} + +func (d *dir) list(p string) ([]string, error) { + n := d.find(p) + + if n.path() != p { + return nil, errNotExists + } + + if !n.isdir() { + return nil, errIsNotDir + } + + var children []string + for _, child := range n.(*dir).children { + children = append(children, child.path()) + } + + sort.Strings(children) + return children, nil +} + +// mkfile or return the existing one. returns an error if it exists and is a +// directory. Essentially, this is open or create. +func (d *dir) mkfile(p string) (*file, error) { + n := d.find(p) + if n.path() == p { + if n.isdir() { + return nil, errIsDir + } + + return n.(*file), nil + } + + dirpath, filename := path.Split(p) + // Make any non-existent directories + n, err := d.mkdirs(dirpath) + if err != nil { + return nil, err + } + + dd := n.(*dir) + n = &file{ + common: common{ + p: path.Join(dd.path(), filename), + mod: time.Now(), + }, + } + + dd.add(n) + return n.(*file), nil +} + +// mkdirs creates any missing directory entries in p and returns the result. +func (d *dir) mkdirs(p string) (*dir, error) { + if p == "" { + p = "/" + } + + n := d.find(p) + + if !n.isdir() { + // Found something there + return nil, errIsNotDir + } + + if n.path() == p { + return n.(*dir), nil + } + + dd := n.(*dir) + + relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") + + if relative == "" { + return dd, nil + } + + components := strings.Split(relative, "/") + for _, component := range components { + d, err := dd.mkdir(component) + + if err != nil { + // This should actually never happen, since there are no children. + return nil, err + } + dd = d + } + + return dd, nil +} + +// mkdir creates a child directory under d with the given name. +func (d *dir) mkdir(name string) (*dir, error) { + if name == "" { + return nil, fmt.Errorf("invalid dirname") + } + + _, ok := d.children[name] + if ok { + return nil, errExists + } + + child := &dir{ + common: common{ + p: path.Join(d.path(), name), + mod: time.Now(), + }, + } + d.add(child) + d.mod = time.Now() + + return child, nil +} + +func (d *dir) move(src, dst string) error { + dstDirname, _ := path.Split(dst) + + dp, err := d.mkdirs(dstDirname) + if err != nil { + return err + } + + srcDirname, srcFilename := path.Split(src) + sp := d.find(srcDirname) + + if sp.path() != srcDirname { + return errNotExists + } + + s, ok := sp.(*dir).children[srcFilename] + if !ok { + return errNotExists + } + + delete(sp.(*dir).children, srcFilename) + + switch n := s.(type) { + case *dir: + n.p = dst + case *file: + n.p = dst + } + + dp.add(s) + + return nil +} + +func (d *dir) delete(p string) error { + dirname, filename := path.Split(p) + parent := d.find(dirname) + + if dirname != parent.path() { + return errNotExists + } + + if _, ok := parent.(*dir).children[filename]; !ok { + return errNotExists + } + + delete(parent.(*dir).children, filename) + return nil +} + +// dump outputs a primitive directory structure to stdout. +func (d *dir) dump(indent string) { + fmt.Println(indent, d.name()+"/") + + for _, child := range d.children { + if child.isdir() { + child.(*dir).dump(indent + "\t") + } else { + fmt.Println(indent, child.name()) + } + + } +} + +func (d *dir) String() string { + return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) +} + +// file stores actual data in the fs tree. It acts like an open, seekable file +// where operations are conducted through ReadAt and WriteAt. Use it with +// SectionReader for the best effect. +type file struct { + common + data []byte +} + +var _ node = &file{} + +func (f *file) isdir() bool { + return false +} + +func (f *file) truncate() { + f.data = f.data[:0] +} + +func (f *file) sectionReader(offset int64) io.Reader { + return io.NewSectionReader(f, offset, int64(len(f.data))-offset) +} + +func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { + return copy(p, f.data[offset:]), nil +} + +func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { + if len(f.data) > 0 && offset >= int64(len(f.data)) { + // Extend missing region with a zero pad, while also preallocating out to size of p. + pad := offset - int64(len(f.data)) + size := len(p) + int(pad) + f.data = append(f.data, make([]byte, pad, size)...) + } + + f.data = append(f.data, p...) + return len(p), nil +} + +func (f *file) String() string { + return fmt.Sprintf("&file{path: %q}", f.p) +} + +// common provides shared fields and methods for node implementations. +type common struct { + p string + mod time.Time +} + +func (c *common) name() string { + _, name := path.Split(c.p) + return name +} + +func (c *common) path() string { + return c.p +} + +func (c *common) modtime() time.Time { + return c.mod +} From 70ab06b8640416021696d39990fc583098c70610 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 4 Dec 2014 20:55:59 -0800 Subject: [PATCH 094/165] Update storage package to use StorageDriver.Stat This change updates the backend storage package that consumes StorageDriver to use the new Stat call, over CurrentSize. It also makes minor updates for using WriteStream and ReadStream. --- storage/filereader.go | 21 ++++++++++++++------- storage/layerreader.go | 7 +++---- storage/layerstore.go | 7 ------- storage/layerupload.go | 27 ++++++++++++++------------- storage/manifeststore.go | 8 ++++++-- 5 files changed, 37 insertions(+), 33 deletions(-) diff --git a/storage/filereader.go b/storage/filereader.go index 8f1f5205..bcc2614e 100644 --- a/storage/filereader.go +++ b/storage/filereader.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "time" "github.com/docker/docker-registry/storagedriver" ) @@ -16,8 +17,9 @@ type fileReader struct { driver storagedriver.StorageDriver // identifying fields - path string - size int64 // size is the total layer size, must be set. + path string + size int64 // size is the total layer size, must be set. + modtime time.Time // mutable fields rc io.ReadCloser // remote read closer @@ -28,16 +30,21 @@ type fileReader struct { func newFileReader(driver storagedriver.StorageDriver, path string) (*fileReader, error) { // Grab the size of the layer file, ensuring existence. - size, err := driver.CurrentSize(path) + fi, err := driver.Stat(path) if err != nil { return nil, err } + if fi.IsDir() { + return nil, fmt.Errorf("cannot read a directory") + } + return &fileReader{ - driver: driver, - path: path, - size: int64(size), + driver: driver, + path: path, + size: fi.Size(), + modtime: fi.ModTime(), }, nil } @@ -126,7 +133,7 @@ func (fr *fileReader) reader() (io.Reader, error) { } // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.path, uint64(fr.offset)) + rc, err := fr.driver.ReadStream(fr.path, fr.offset) if err != nil { return nil, err diff --git a/storage/layerreader.go b/storage/layerreader.go index 2cc184fd..fa2275d9 100644 --- a/storage/layerreader.go +++ b/storage/layerreader.go @@ -11,9 +11,8 @@ import ( type layerReader struct { fileReader - name string // repo name of this layer - digest digest.Digest - createdAt time.Time + name string // repo name of this layer + digest digest.Digest } var _ Layer = &layerReader{} @@ -27,5 +26,5 @@ func (lrs *layerReader) Digest() digest.Digest { } func (lrs *layerReader) CreatedAt() time.Time { - return lrs.createdAt + return lrs.modtime } diff --git a/storage/layerstore.go b/storage/layerstore.go index d731a5b8..ddebdbcc 100644 --- a/storage/layerstore.go +++ b/storage/layerstore.go @@ -1,8 +1,6 @@ package storage import ( - "time" - "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storagedriver" ) @@ -55,11 +53,6 @@ func (ls *layerStore) Fetch(name string, digest digest.Digest) (Layer, error) { fileReader: *fr, name: name, digest: digest, - - // TODO(stevvooe): Storage backend does not support modification time - // queries yet. Layers "never" change, so just return the zero value - // plus a nano-second. - createdAt: (time.Time{}).Add(time.Nanosecond), }, nil } diff --git a/storage/layerupload.go b/storage/layerupload.go index de1a894b..3ee593b9 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -107,9 +107,13 @@ func (luc *layerUploadController) Finish(size int64, digest digest.Digest) (Laye return nil, err } - if err := luc.writeLayer(fp, size, digest); err != nil { + if nn, err := luc.writeLayer(fp, digest); err != nil { // Cleanup? return nil, err + } else if nn != size { + // TODO(stevvooe): Short write. Will have to delete the location and + // report an error. This error needs to be reported to the client. + return nil, fmt.Errorf("short write writing layer") } // Yes! We have written some layer data. Let's make it visible. Link the @@ -281,19 +285,20 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d return dgst, nil } -// writeLayer actually writes the the layer file into its final destination. -// The layer should be validated before commencing the write. -func (luc *layerUploadController) writeLayer(fp layerFile, size int64, dgst digest.Digest) error { +// writeLayer actually writes the the layer file into its final destination, +// identified by dgst. The layer should be validated before commencing the +// write. +func (luc *layerUploadController) writeLayer(fp layerFile, dgst digest.Digest) (nn int64, err error) { blobPath, err := luc.layerStore.pathMapper.path(blobPathSpec{ digest: dgst, }) if err != nil { - return err + return 0, err } // Check for existence - if _, err := luc.layerStore.driver.CurrentSize(blobPath); err != nil { + if _, err := luc.layerStore.driver.Stat(blobPath); err != nil { // TODO(stevvooe): This check is kind of problematic and very racy. switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -303,22 +308,18 @@ func (luc *layerUploadController) writeLayer(fp layerFile, size int64, dgst dige // content addressable and we should just use this to ensure we // have it written. Although, we do need to verify that the // content that is there is the correct length. - return err + return 0, err } } // Seek our local layer file back now. if _, err := fp.Seek(0, os.SEEK_SET); err != nil { // Cleanup? - return err + return 0, err } // Okay: we can write the file to the blob store. - if err := luc.layerStore.driver.WriteStream(blobPath, 0, uint64(size), fp); err != nil { - return err - } - - return nil + return luc.layerStore.driver.WriteStream(blobPath, 0, fp) } // linkLayer links a valid, written layer blob into the registry under the diff --git a/storage/manifeststore.go b/storage/manifeststore.go index e1760dd8..a6bdf3b3 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -22,12 +22,16 @@ func (ms *manifestStore) Exists(name, tag string) (bool, error) { return false, err } - size, err := ms.driver.CurrentSize(p) + fi, err := ms.driver.Stat(p) if err != nil { return false, err } - if size == 0 { + if fi.IsDir() { + return false, fmt.Errorf("unexpected directory at path: %v, name=%s tag=%s", p, name, tag) + } + + if fi.Size() == 0 { return false, nil } From d703a86a642d605d516ffe635cd0888c80dd1ce0 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 11:46:41 -0800 Subject: [PATCH 095/165] Add checks for ReadStream offset boundary conditions Several checks for ReadStream with offset around boundary conditions were missing. The new checks ensure negative offsets are detected and io.EOF is returned properly when trying to read past the end of a file. The filesystem and inmemory driver have been updated accordingly. An outline of missing checks for List are also part of this commit. Action will be taken here based on discussion in issue #819. --- storagedriver/filesystem/driver.go | 4 +++ storagedriver/inmemory/driver.go | 4 +++ storagedriver/testsuites/testsuites.go | 43 ++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 6fb56891..05ec6175 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -82,6 +82,10 @@ func (d *Driver) PutContent(subPath string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index b6bdc258..0b68e021 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -83,6 +83,10 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() + if offset < 0 { + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + path = d.normalize(path) found := d.root.find(path) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 6a51cd19..0967e2db 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -2,6 +2,7 @@ package testsuites import ( "bytes" + "io" "io/ioutil" "math/rand" "os" @@ -209,6 +210,43 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { readContents, err = ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contentsChunk3) + + // Ensure we get invalid offest for negative offsets. + reader, err = suite.StorageDriver.ReadStream(filename, -1) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) + c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) + c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) + c.Assert(reader, check.IsNil) + + // Read past the end of the content and make sure we get a reader that + // returns 0 bytes and io.EOF + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3) + c.Assert(err, check.IsNil) + defer reader.Close() + + buf := make([]byte, chunkSize) + n, err := reader.Read(buf) + c.Assert(err, check.Equals, io.EOF) + c.Assert(n, check.Equals, 0) + + // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. + reader, err = suite.StorageDriver.ReadStream(filename, chunkSize*3-1) + c.Assert(err, check.IsNil) + defer reader.Close() + + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 1) + + // We don't care whether the io.EOF comes on the this read or the first + // zero read, but the only error acceptable here is io.EOF. + if err != nil { + c.Assert(err, check.Equals, io.EOF) + } + + // Any more reads should result in zero bytes and io.EOF + n, err = reader.Read(buf) + c.Assert(n, check.Equals, 0) + c.Assert(err, check.Equals, io.EOF) } // TestContinueStreamAppend tests that a stream write can be appended to without @@ -329,6 +367,11 @@ func (suite *DriverSuite) TestList(c *check.C) { sort.Strings(keys) c.Assert(keys, check.DeepEquals, childFiles) + + // A few checks to add here (check out #819 for more discussion on this): + // 1. Ensure that all paths are absolute. + // 2. Ensure that listings only include direct children. + // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). } // TestMove checks that a moved object no longer exists at the source path and From 8cb0e3398c35d147ac73f863fb2dbd96159999d6 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 14:05:37 -0800 Subject: [PATCH 096/165] Disable s3, azure and ipc packages and testing The packages causing build errors are being disabled for now to let us split up the work in the different driver implementations without blocking integration into the main branch. The s3 and azure implementations need some effort to add Stat support. The ipc package needs that work plus some care around hanging send calls. --- storagedriver/azure/azure.go | 2 + storagedriver/azure/azure_test.go | 2 + storagedriver/factory/factory.go | 26 +++++++----- storagedriver/ipc/client.go | 2 + storagedriver/ipc/ipc.go | 2 + storagedriver/ipc/server.go | 2 + storagedriver/s3/s3.go | 2 + storagedriver/s3/s3_test.go | 2 + storagedriver/testsuites/testsuites.go | 56 ++++++++++++++------------ 9 files changed, 60 insertions(+), 36 deletions(-) diff --git a/storagedriver/azure/azure.go b/storagedriver/azure/azure.go index 489a6348..64402f3b 100644 --- a/storagedriver/azure/azure.go +++ b/storagedriver/azure/azure.go @@ -1,3 +1,5 @@ +// +build ignore + // Package azure provides a storagedriver.StorageDriver implementation to // store blobs in Microsoft Azure Blob Storage Service. package azure diff --git a/storagedriver/azure/azure_test.go b/storagedriver/azure/azure_test.go index 888d1165..1edcc1ea 100644 --- a/storagedriver/azure/azure_test.go +++ b/storagedriver/azure/azure_test.go @@ -1,3 +1,5 @@ +// +build ignore + package azure import ( diff --git a/storagedriver/factory/factory.go b/storagedriver/factory/factory.go index 0b85f372..0f8ca001 100644 --- a/storagedriver/factory/factory.go +++ b/storagedriver/factory/factory.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/docker/docker-registry/storagedriver" - "github.com/docker/docker-registry/storagedriver/ipc" ) // driverFactories stores an internal mapping between storage driver names and their respective @@ -41,16 +40,23 @@ func Register(name string, factory StorageDriverFactory) { func Create(name string, parameters map[string]string) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { + return nil, InvalidStorageDriverError{name} + + // NOTE(stevvooe): We are disabling storagedriver ipc for now, as the + // server and client need to be updated for the changed API calls and + // there were some problems libchan hanging. We'll phase this + // functionality back in over the next few weeks. + // No registered StorageDriverFactory found, try ipc - driverClient, err := ipc.NewDriverClient(name, parameters) - if err != nil { - return nil, InvalidStorageDriverError{name} - } - err = driverClient.Start() - if err != nil { - return nil, err - } - return driverClient, nil + // driverClient, err := ipc.NewDriverClient(name, parameters) + // if err != nil { + // return nil, InvalidStorageDriverError{name} + // } + // err = driverClient.Start() + // if err != nil { + // return nil, err + // } + // return driverClient, nil } return driverFactory.Create(parameters) } diff --git a/storagedriver/ipc/client.go b/storagedriver/ipc/client.go index 7e52a084..2dc5c44e 100644 --- a/storagedriver/ipc/client.go +++ b/storagedriver/ipc/client.go @@ -1,3 +1,5 @@ +// +build ignore + package ipc import ( diff --git a/storagedriver/ipc/ipc.go b/storagedriver/ipc/ipc.go index 82bdcbd7..45c54659 100644 --- a/storagedriver/ipc/ipc.go +++ b/storagedriver/ipc/ipc.go @@ -1,3 +1,5 @@ +// +build ignore + package ipc import ( diff --git a/storagedriver/ipc/server.go b/storagedriver/ipc/server.go index 1c0084f9..fa0077a8 100644 --- a/storagedriver/ipc/server.go +++ b/storagedriver/ipc/server.go @@ -1,3 +1,5 @@ +// +build ignore + package ipc import ( diff --git a/storagedriver/s3/s3.go b/storagedriver/s3/s3.go index 3d5cd511..e26d3be2 100644 --- a/storagedriver/s3/s3.go +++ b/storagedriver/s3/s3.go @@ -1,3 +1,5 @@ +// +build ignore + package s3 import ( diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go index 6d7b3ff7..f7b4f80e 100644 --- a/storagedriver/s3/s3_test.go +++ b/storagedriver/s3/s3_test.go @@ -1,3 +1,5 @@ +// +build ignore + package s3 import ( diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 0967e2db..0e4f5be1 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -13,7 +13,6 @@ import ( "time" "github.com/docker/docker-registry/storagedriver" - "github.com/docker/docker-registry/storagedriver/ipc" "gopkg.in/check.v1" ) @@ -33,29 +32,34 @@ func RegisterInProcessSuite(driverConstructor DriverConstructor, skipCheck SkipC // RegisterIPCSuite registers a storage driver test suite which runs the named // driver as a child process with the given parameters. func RegisterIPCSuite(driverName string, ipcParams map[string]string, skipCheck SkipCheck) { - suite := &DriverSuite{ - Constructor: func() (storagedriver.StorageDriver, error) { - d, err := ipc.NewDriverClient(driverName, ipcParams) - if err != nil { - return nil, err - } - err = d.Start() - if err != nil { - return nil, err - } - return d, nil - }, - SkipCheck: skipCheck, - } - suite.Teardown = func() error { - if suite.StorageDriver == nil { - return nil - } + panic("ipc testing is disabled for now") - driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) - return driverClient.Stop() - } - check.Suite(suite) + // NOTE(stevvooe): IPC testing is disabled for now. Uncomment the code + // block before and remove the panic when we phase it back in. + + // suite := &DriverSuite{ + // Constructor: func() (storagedriver.StorageDriver, error) { + // d, err := ipc.NewDriverClient(driverName, ipcParams) + // if err != nil { + // return nil, err + // } + // err = d.Start() + // if err != nil { + // return nil, err + // } + // return d, nil + // }, + // SkipCheck: skipCheck, + // } + // suite.Teardown = func() error { + // if suite.StorageDriver == nil { + // return nil + // } + + // driverClient := suite.StorageDriver.(*ipc.StorageDriverClient) + // return driverClient.Stop() + // } + // check.Suite(suite) } // SkipCheck is a function used to determine if a test suite should be skipped. @@ -520,9 +524,9 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { // in to WriteStream concurrently without hanging. // TODO(bbland): fix this test... func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { - c.Skip("Need to fix out-of-process concurrency") - } + // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { + // c.Skip("Need to fix out-of-process concurrency") + // } var wg sync.WaitGroup From 2f78886aac3e93a6395ce1d70b45cc972d94ae02 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 14:34:22 -0800 Subject: [PATCH 097/165] Disable s3 and ipc executable entry points --- cmd/registry-storagedriver-azure/main.go | 2 ++ cmd/registry-storagedriver-filesystem/main.go | 2 ++ cmd/registry-storagedriver-inmemory/main.go | 2 ++ cmd/registry-storagedriver-s3/main.go | 2 ++ cmd/registry/main.go | 1 - 5 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/registry-storagedriver-azure/main.go b/cmd/registry-storagedriver-azure/main.go index b9944342..584699bf 100644 --- a/cmd/registry-storagedriver-azure/main.go +++ b/cmd/registry-storagedriver-azure/main.go @@ -1,3 +1,5 @@ +// +build ignore + package main import ( diff --git a/cmd/registry-storagedriver-filesystem/main.go b/cmd/registry-storagedriver-filesystem/main.go index 5ea1eb70..0e555b61 100644 --- a/cmd/registry-storagedriver-filesystem/main.go +++ b/cmd/registry-storagedriver-filesystem/main.go @@ -1,3 +1,5 @@ +// +build ignore + package main import ( diff --git a/cmd/registry-storagedriver-inmemory/main.go b/cmd/registry-storagedriver-inmemory/main.go index 77b1c530..b75d3694 100644 --- a/cmd/registry-storagedriver-inmemory/main.go +++ b/cmd/registry-storagedriver-inmemory/main.go @@ -1,3 +1,5 @@ +// +build ignore + package main import ( diff --git a/cmd/registry-storagedriver-s3/main.go b/cmd/registry-storagedriver-s3/main.go index 21192a0f..e2234b7b 100644 --- a/cmd/registry-storagedriver-s3/main.go +++ b/cmd/registry-storagedriver-s3/main.go @@ -1,3 +1,5 @@ +// +build ignore + package main import ( diff --git a/cmd/registry/main.go b/cmd/registry/main.go index 150c7d6b..29fa24c1 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -15,7 +15,6 @@ import ( "github.com/docker/docker-registry/configuration" _ "github.com/docker/docker-registry/storagedriver/filesystem" _ "github.com/docker/docker-registry/storagedriver/inmemory" - _ "github.com/docker/docker-registry/storagedriver/s3" ) func main() { From 1a75fccb43c9bf3cbe2c14a807c20727a988de96 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 14:34:54 -0800 Subject: [PATCH 098/165] Address PathNotFoundError in (*manifestStore).Exists Exists was returning an error when encountering a PathNotFoundError when it should just return false without an error. --- storage/manifeststore.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/storage/manifeststore.go b/storage/manifeststore.go index a6bdf3b3..ebbc6b3c 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -24,7 +24,12 @@ func (ms *manifestStore) Exists(name, tag string) (bool, error) { fi, err := ms.driver.Stat(p) if err != nil { - return false, err + switch err.(type) { + case storagedriver.PathNotFoundError: + return false, nil + default: + return false, err + } } if fi.IsDir() { From 1a25a34c5dd117409771414d5b0e2ebf5ddd15fd Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 19:04:00 -0800 Subject: [PATCH 099/165] Use circleci for continuous builds --- circle.yml | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 circle.yml diff --git a/circle.yml b/circle.yml new file mode 100644 index 00000000..96fa911a --- /dev/null +++ b/circle.yml @@ -0,0 +1,63 @@ +machine: + pre: + - curl -o go.tar.gz -sL https://golang.org/dl/go1.4rc2.linux-amd64.tar.gz + - sudo rm -rf /usr/local/go + - sudo tar -C /usr/local -xzf go.tar.gz + - sudo chmod a+w /usr/local/go/src/ + + hosts: + fancy: 127.0.0.1 + +dependencies: + post: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get github.com/golang/lint/golint + +test: + pre: + - go version + override: + - test -z $(gofmt -s -l . | tee /dev/stderr) + - go vet ./... + - test -z $(golint ./... | tee /dev/stderr) + - go test -race -test.v ./...: + timeout: 600 + + # TODO(stevvooe): The following is an attempt at using goveralls but it + # just doesn't work. goveralls requires a single profile file to be + # submitted at once, but can't run all of the tests for all the packages + # at once. The command below attempts to fix this but fails because it + # creates a new "job" for run of coveralls, making it so that the coverage + # is partially reported a large number of times. + + # - cd $HOME/.go_project/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME && go list ./... | xargs -I{} goveralls -service circleci -repotoken $COVERALLS_TOKEN -race {} + +general: + branches: + ignore: + - master + - 0.7 + - 0.8 + - 0.9 + - 1.0 + # This doesn't work, but it would be nice if it did. + # build_dir: ../.go_project/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + +# notify: +# email: +# recipients: +# - distribution@docker.com + +# slack: +# team: docker +# channel: "#dt" +# username: mom +# token: {{SLACK_TOKEN}} +# on_success: false +# on_failure: true + + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck From 14e7adb3a041159a4465aeb939b5454795986cd5 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 19:20:42 -0800 Subject: [PATCH 100/165] Add documentation for (*DriverSuite).TestStatCall --- storagedriver/testsuites/testsuites.go | 1 + 1 file changed, 1 insertion(+) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 0e4f5be1..f745781e 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -468,6 +468,7 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } +// TestStatCall runs verifies the implementation of the storagedriver's Stat call. func (suite *DriverSuite) TestStatCall(c *check.C) { content := randomString(4096) dirPath := randomString(32) From e15e07cb40d6fad71446930190c0b2ad99110a24 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 5 Dec 2014 20:20:01 -0800 Subject: [PATCH 101/165] Disable race detector during tests due to memory usage The tests are using way too much memory with the race detector enabled causing the build machines to fall over. Cursory profiling shows no leaks but it may need a closer look. For now, it will be disabled but this cannot be permanent. --- circle.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/circle.yml b/circle.yml index 96fa911a..16a6c817 100644 --- a/circle.yml +++ b/circle.yml @@ -21,8 +21,11 @@ test: - test -z $(gofmt -s -l . | tee /dev/stderr) - go vet ./... - test -z $(golint ./... | tee /dev/stderr) - - go test -race -test.v ./...: - timeout: 600 + - go test -test.v ./... + + # Disabling the race detector due to massive memory usage. + # - go test -race -test.v ./...: + # timeout: 600 # TODO(stevvooe): The following is an attempt at using goveralls but it # just doesn't work. goveralls requires a single profile file to be From e364e71aab30910734835ce3c81cb382ad3c6c69 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 8 Dec 2014 09:51:59 -0800 Subject: [PATCH 102/165] Address go vet declaration issue in tests --- client/client_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index 57578c81..d4a335ec 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -91,7 +91,7 @@ func TestPush(t *testing.T) { } handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ - testutil.RequestResponseMapping{ + { Request: testutil.Request{ Method: "PUT", Route: "/v2/" + name + "/manifest/" + tag, @@ -184,7 +184,7 @@ func TestPull(t *testing.T) { } handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ - testutil.RequestResponseMapping{ + { Request: testutil.Request{ Method: "GET", Route: "/v2/" + name + "/manifest/" + tag, @@ -307,7 +307,7 @@ func TestPullResume(t *testing.T) { for i := 0; i < 3; i++ { layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMap{ - testutil.RequestResponseMapping{ + { Request: testutil.Request{ Method: "GET", Route: "/v2/" + name + "/manifest/" + tag, From 9e38ca22311b15849ef87c41f88005911fe50ff7 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 8 Dec 2014 15:47:37 -0800 Subject: [PATCH 103/165] Runs s3 storagedriver tests against all regions (excluding gov) --- storagedriver/s3/s3_test.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/storagedriver/s3/s3_test.go b/storagedriver/s3/s3_test.go index f7b4f80e..fd17cd58 100644 --- a/storagedriver/s3/s3_test.go +++ b/storagedriver/s3/s3_test.go @@ -20,32 +20,37 @@ func Test(t *testing.T) { check.TestingT(t) } func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") secretKey := os.Getenv("AWS_SECRET_KEY") - region := os.Getenv("AWS_REGION") bucket := os.Getenv("S3_BUCKET") encrypt := os.Getenv("S3_ENCRYPT") - s3DriverConstructor := func() (storagedriver.StorageDriver, error) { + s3DriverConstructor := func(region aws.Region) (storagedriver.StorageDriver, error) { shouldEncrypt, err := strconv.ParseBool(encrypt) if err != nil { return nil, err } - return New(accessKey, secretKey, aws.GetRegion(region), shouldEncrypt, bucket) + return New(accessKey, secretKey, region, shouldEncrypt, bucket) } // Skip S3 storage driver tests if environment variable parameters are not provided skipCheck := func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + if accessKey == "" || secretKey == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, S3_BUCKET, and S3_ENCRYPT to run S3 tests" } return "" } - testsuites.RegisterInProcessSuite(s3DriverConstructor, skipCheck) - testsuites.RegisterIPCSuite(driverName, map[string]string{ - "accesskey": accessKey, - "secretkey": secretKey, - "region": region, - "bucket": bucket, - "encrypt": encrypt, - }, skipCheck) + for _, region := range aws.Regions { + if region == aws.USGovWest { + continue + } + + testsuites.RegisterInProcessSuite(s3DriverConstructor(region), skipCheck) + testsuites.RegisterIPCSuite(driverName, map[string]string{ + "accesskey": accessKey, + "secretkey": secretKey, + "region": region.Name, + "bucket": bucket, + "encrypt": encrypt, + }, skipCheck) + } } From cacf33ab6246ef83299e51831eac710913e9faa2 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 8 Dec 2014 18:22:08 -0800 Subject: [PATCH 104/165] Uses random paths with slashes for storagedriver tests, adds edge cases This replaces only using flat filenames, to better test nested file behaviors. Fixed inmemory/mfs.go and filesystem/driver.go after finding bugs with the new tests and test behavior. --- storagedriver/filesystem/driver.go | 4 + storagedriver/inmemory/mfs.go | 4 +- storagedriver/testsuites/testsuites.go | 237 +++++++++++++++++++------ 3 files changed, 186 insertions(+), 59 deletions(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 05ec6175..3e352125 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -203,6 +203,10 @@ func (d *Driver) Move(sourcePath string, destPath string) error { return storagedriver.PathNotFoundError{Path: sourcePath} } + if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { + return err + } + err := os.Rename(source, dest) return err } diff --git a/storagedriver/inmemory/mfs.go b/storagedriver/inmemory/mfs.go index 5248bbc6..9eeac0da 100644 --- a/storagedriver/inmemory/mfs.go +++ b/storagedriver/inmemory/mfs.go @@ -210,7 +210,7 @@ func (d *dir) move(src, dst string) error { srcDirname, srcFilename := path.Split(src) sp := d.find(srcDirname) - if sp.path() != srcDirname { + if srcDirname != strings.TrimSuffix(sp.path(), "/")+"/" { return errNotExists } @@ -237,7 +237,7 @@ func (d *dir) delete(p string) error { dirname, filename := path.Split(p) parent := d.find(dirname) - if dirname != parent.path() { + if dirname != strings.TrimSuffix(parent.path(), "/")+"/" { return errNotExists } diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index f745781e..1abda715 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -2,6 +2,7 @@ package testsuites import ( "bytes" + "crypto/sha1" "io" "io/ioutil" "math/rand" @@ -109,35 +110,54 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) { // TestWriteRead1 tests a simple write-read workflow. func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomString(32) + filename := randomPath(32) contents := []byte("a") suite.writeReadCompare(c, filename, contents) } // TestWriteRead2 tests a simple write-read workflow with unicode data. func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomString(32) + filename := randomPath(32) contents := []byte("\xc3\x9f") suite.writeReadCompare(c, filename, contents) } // TestWriteRead3 tests a simple write-read workflow with a small string. func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomString(32) - contents := []byte(randomString(32)) + filename := randomPath(32) + contents := randomContents(32) suite.writeReadCompare(c, filename, contents) } // TestWriteRead4 tests a simple write-read workflow with 1MB of data. func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomString(32) - contents := []byte(randomString(1024 * 1024)) + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) +} + +// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompare(c, filename, contents) +} + +// TestTruncate tests that putting smaller contents than an original file does +// remove the excess contents. +func (suite *DriverSuite) TestTruncate(c *check.C) { + filename := randomPath(32) + contents := randomContents(1024 * 1024) + suite.writeReadCompare(c, filename, contents) + + contents = randomContents(1024) suite.writeReadCompare(c, filename, contents) } // TestReadNonexistent tests reading content from an empty path. func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomString(32) + filename := randomPath(32) _, err := suite.StorageDriver.GetContent(filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) @@ -145,7 +165,7 @@ func (suite *DriverSuite) TestReadNonexistent(c *check.C) { // TestWriteReadStreams1 tests a simple write-read streaming workflow. func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomString(32) + filename := randomPath(32) contents := []byte("a") suite.writeReadCompareStreams(c, filename, contents) } @@ -153,7 +173,7 @@ func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { // TestWriteReadStreams2 tests a simple write-read streaming workflow with // unicode data. func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomString(32) + filename := randomPath(32) contents := []byte("\xc3\x9f") suite.writeReadCompareStreams(c, filename, contents) } @@ -161,30 +181,68 @@ func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { // TestWriteReadStreams3 tests a simple write-read streaming workflow with a // small amount of data. func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomString(32) - contents := []byte(randomString(32)) + filename := randomPath(32) + contents := randomContents(32) suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB // of data. func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomString(32) - contents := []byte(randomString(1024 * 1024)) + filename := randomPath(32) + contents := randomContents(1024 * 1024) suite.writeReadCompareStreams(c, filename, contents) } +// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the +// storage driver safely. +func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { + filename := randomPath(32) + contents := []byte{0x80, 0x80, 0x80, 0x80} + suite.writeReadCompareStreams(c, filename, contents) +} + +// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage +// driver safely. +func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { + if testing.Short() { + c.Skip("Skipping test in short mode") + } + + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) + + checksum := sha1.New() + var offset int64 = 0 + var chunkSize int64 = 1024 * 1024 + + for i := 0; i < 5*1024; i++ { + contents := randomContents(chunkSize) + written, err := suite.StorageDriver.WriteStream(filename, offset, io.TeeReader(bytes.NewReader(contents), checksum)) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, chunkSize) + offset += chunkSize + } + reader, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + + writtenChecksum := sha1.New() + io.Copy(writtenChecksum, reader) + + c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) +} + // TestReadStreamWithOffset tests that the appropriate data is streamed when // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { - filename := randomString(32) - defer suite.StorageDriver.Delete(filename) + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) chunkSize := int64(32) - contentsChunk1 := []byte(randomString(chunkSize)) - contentsChunk2 := []byte(randomString(chunkSize)) - contentsChunk3 := []byte(randomString(chunkSize)) + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) err := suite.StorageDriver.PutContent(filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) c.Assert(err, check.IsNil) @@ -256,15 +314,15 @@ func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { // TestContinueStreamAppend tests that a stream write can be appended to without // corrupting the data. func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { - filename := randomString(32) - defer suite.StorageDriver.Delete(filename) + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) chunkSize := int64(10 * 1024 * 1024) - contentsChunk1 := []byte(randomString(chunkSize)) - contentsChunk2 := []byte(randomString(chunkSize)) - contentsChunk3 := []byte(randomString(chunkSize)) - contentsChunk4 := []byte(randomString(chunkSize)) + contentsChunk1 := randomContents(chunkSize) + contentsChunk2 := randomContents(chunkSize) + contentsChunk3 := randomContents(chunkSize) + contentsChunk4 := randomContents(chunkSize) zeroChunk := make([]byte, int64(chunkSize)) fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) @@ -337,7 +395,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { // TestReadNonexistentStream tests that reading a stream for a nonexistent path // fails. func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomString(32) + filename := randomPath(32) _, err := suite.StorageDriver.ReadStream(filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) @@ -345,15 +403,15 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomString(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(rootDirectory) + rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) + defer suite.StorageDriver.Delete("/") - parentDirectory := rootDirectory + "/" + randomString(int64(8+rand.Intn(8))) + parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomString(int64(8+rand.Intn(8))) + childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles[i] = childFile - err := suite.StorageDriver.PutContent(childFile, []byte(randomString(32))) + err := suite.StorageDriver.PutContent(childFile, randomContents(32)) c.Assert(err, check.IsNil) } sort.Strings(childFiles) @@ -381,12 +439,12 @@ func (suite *DriverSuite) TestList(c *check.C) { // TestMove checks that a moved object no longer exists at the source path and // does exist at the destination. func (suite *DriverSuite) TestMove(c *check.C) { - contents := []byte(randomString(32)) - sourcePath := randomString(32) - destPath := randomString(32) + contents := randomContents(32) + sourcePath := randomPath(32) + destPath := randomPath(32) - defer suite.StorageDriver.Delete(sourcePath) - defer suite.StorageDriver.Delete(destPath) + defer suite.StorageDriver.Delete(firstPart(sourcePath)) + defer suite.StorageDriver.Delete(firstPart(destPath)) err := suite.StorageDriver.PutContent(sourcePath, contents) c.Assert(err, check.IsNil) @@ -405,8 +463,8 @@ func (suite *DriverSuite) TestMove(c *check.C) { // TestMoveNonexistent checks that moving a nonexistent key fails func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - sourcePath := randomString(32) - destPath := randomString(32) + sourcePath := randomPath(32) + destPath := randomPath(32) err := suite.StorageDriver.Move(sourcePath, destPath) c.Assert(err, check.NotNil) @@ -416,10 +474,10 @@ func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { // TestDelete checks that the delete operation removes data from the storage // driver func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomString(32) - contents := []byte(randomString(32)) + filename := randomPath(32) + contents := randomContents(32) - defer suite.StorageDriver.Delete(filename) + defer suite.StorageDriver.Delete(firstPart(filename)) err := suite.StorageDriver.PutContent(filename, contents) c.Assert(err, check.IsNil) @@ -434,7 +492,7 @@ func (suite *DriverSuite) TestDelete(c *check.C) { // TestDeleteNonexistent checks that removing a nonexistent key fails. func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomString(32) + filename := randomPath(32) err := suite.StorageDriver.Delete(filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) @@ -442,13 +500,13 @@ func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { // TestDeleteFolder checks that deleting a folder removes all child elements. func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomString(32) - filename1 := randomString(32) - filename2 := randomString(32) - contents := []byte(randomString(32)) + dirname := randomPath(32) + filename1 := randomPath(32) + filename2 := randomPath(32) + filename3 := randomPath(32) + contents := randomContents(32) - defer suite.StorageDriver.Delete(path.Join(dirname, filename1)) - defer suite.StorageDriver.Delete(path.Join(dirname, filename2)) + defer suite.StorageDriver.Delete(firstPart(dirname)) err := suite.StorageDriver.PutContent(path.Join(dirname, filename1), contents) c.Assert(err, check.IsNil) @@ -456,6 +514,22 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { err = suite.StorageDriver.PutContent(path.Join(dirname, filename2), contents) c.Assert(err, check.IsNil) + err = suite.StorageDriver.PutContent(path.Join(dirname, filename3), contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Delete(path.Join(dirname, filename1)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename1)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + c.Assert(err, check.IsNil) + err = suite.StorageDriver.Delete(dirname) c.Assert(err, check.IsNil) @@ -466,22 +540,28 @@ func (suite *DriverSuite) TestDeleteFolder(c *check.C) { _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename2)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.GetContent(path.Join(dirname, filename3)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestStatCall runs verifies the implementation of the storagedriver's Stat call. func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomString(4096) - dirPath := randomString(32) - fileName := randomString(32) + content := randomContents(4096) + dirPath := randomPath(32) + fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) + defer suite.StorageDriver.Delete(dirPath) + // Call on non-existent file/dir, check error. fi, err := suite.StorageDriver.Stat(filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) - err = suite.StorageDriver.PutContent(filePath, []byte(content)) + err = suite.StorageDriver.PutContent(filePath, content) c.Assert(err, check.IsNil) // Call on regular file, check results @@ -555,7 +635,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tfName := path.Base(tf.Name()) defer suite.StorageDriver.Delete(tfName) - contents := []byte(randomString(size)) + contents := randomContents(size) _, err = tf.Write(contents) c.Assert(err, check.IsNil) @@ -578,7 +658,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(filename) + defer suite.StorageDriver.Delete(firstPart(filename)) err := suite.StorageDriver.PutContent(filename, contents) c.Assert(err, check.IsNil) @@ -590,7 +670,7 @@ func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents } func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(filename) + defer suite.StorageDriver.Delete(firstPart(filename)) nn, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) @@ -606,12 +686,55 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c c.Assert(readContents, check.DeepEquals, contents) } -var pathChars = []byte("abcdefghijklmnopqrstuvwxyz") +var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -func randomString(length int64) string { +func randomPath(length int64) string { + path := "" + for int64(len(path)) < length { + chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunk := randomFilename(chunkLength) + path += chunk + if length-int64(len(path)) == 1 { + path += randomFilename(1) + } else if length-int64(len(path)) > 1 { + path += "/" + } + } + return path +} + +func randomFilename(length int64) string { b := make([]byte, length) for i := range b { - b[i] = pathChars[rand.Intn(len(pathChars))] + b[i] = filenameChars[rand.Intn(len(filenameChars))] } return string(b) } + +func randomContents(length int64) []byte { + b := make([]byte, length) + for i := range b { + b[i] = byte(rand.Intn(2 << 8)) + } + return b +} + +func firstPart(filePath string) string { + for { + if filePath[len(filePath)-1] == '/' { + filePath = filePath[:len(filePath)-1] + } + + dir, file := path.Split(filePath) + if dir == "" && file == "" { + return "/" + } + if dir == "" { + return file + } + if file == "" { + return dir + } + filePath = dir + } +} From 45c29be44205f65646246333f3ae1a4701c21151 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 8 Dec 2014 21:08:07 -0800 Subject: [PATCH 105/165] Address bug in inmemory filesystem WriteAt method --- storagedriver/inmemory/mfs.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/storagedriver/inmemory/mfs.go b/storagedriver/inmemory/mfs.go index 5248bbc6..9d75cb0e 100644 --- a/storagedriver/inmemory/mfs.go +++ b/storagedriver/inmemory/mfs.go @@ -294,15 +294,16 @@ func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { } func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { - if len(f.data) > 0 && offset >= int64(len(f.data)) { - // Extend missing region with a zero pad, while also preallocating out to size of p. - pad := offset - int64(len(f.data)) - size := len(p) + int(pad) - f.data = append(f.data, make([]byte, pad, size)...) + off := int(offset) + if cap(f.data) < off+len(p) { + data := make([]byte, len(f.data), off+len(p)) + copy(data, f.data) + f.data = data } - f.data = append(f.data, p...) - return len(p), nil + f.data = f.data[:off+len(p)] + + return copy(f.data[off:off+len(p)], p), nil } func (f *file) String() string { From c71089c653eb50a878bdcc7fd5ec7dd8f339a836 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 11:06:51 -0800 Subject: [PATCH 106/165] Implement Tags method on ManifestService --- storage/manifest.go | 10 ++++++++++ storage/manifest_test.go | 14 ++++++++++++++ storage/manifeststore.go | 30 ++++++++++++++++++++++++++++++ storage/paths.go | 10 ++++++++++ storage/services.go | 3 +++ 5 files changed, 67 insertions(+) diff --git a/storage/manifest.go b/storage/manifest.go index daeaa39b..6c506244 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -13,6 +13,16 @@ import ( "github.com/docker/docker-registry/digest" ) +// ErrUnknownRepository is returned if the named repository is not known by +// the registry. +type ErrUnknownRepository struct { + Name string +} + +func (err ErrUnknownRepository) Error() string { + return fmt.Sprintf("unknown respository name=%s", err.Name) +} + // ErrUnknownManifest is returned if the manifest is not known by the // registry. type ErrUnknownManifest struct { diff --git a/storage/manifest_test.go b/storage/manifest_test.go index e4517943..ea634df8 100644 --- a/storage/manifest_test.go +++ b/storage/manifest_test.go @@ -99,6 +99,20 @@ func TestManifestStorage(t *testing.T) { if !reflect.DeepEqual(fetchedManifest, sm) { t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) } + + // Grabs the tags and check that this tagged manifest is present + tags, err := ms.Tags(name) + if err != nil { + t.Fatalf("unexpected error fetching tags: %v", err) + } + + if len(tags) != 1 { + t.Fatalf("unexpected tags returned: %v", tags) + } + + if tags[0] != tag { + t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{tag}) + } } type layerKey struct { diff --git a/storage/manifeststore.go b/storage/manifeststore.go index ebbc6b3c..33b16390 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -3,6 +3,7 @@ package storage import ( "encoding/json" "fmt" + "path" "github.com/docker/docker-registry/storagedriver" "github.com/docker/libtrust" @@ -16,6 +17,35 @@ type manifestStore struct { var _ ManifestService = &manifestStore{} +func (ms *manifestStore) Tags(name string) ([]string, error) { + p, err := ms.pathMapper.path(manifestTagsPath{ + name: name, + }) + if err != nil { + return nil, err + } + + var tags []string + entries, err := ms.driver.List(p) + if err != nil { + logrus.Infof("%#v", err) + switch err := err.(type) { + case storagedriver.PathNotFoundError: + return nil, ErrUnknownRepository{Name: name} + default: + return nil, err + } + } + + for _, entry := range entries { + _, filename := path.Split(entry) + + tags = append(tags, filename) + } + + return tags, nil +} + func (ms *manifestStore) Exists(name, tag string) (bool, error) { p, err := ms.path(name, tag) if err != nil { diff --git a/storage/paths.go b/storage/paths.go index ecc3dd32..a3538b85 100644 --- a/storage/paths.go +++ b/storage/paths.go @@ -64,6 +64,8 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { repoPrefix := append(rootPrefix, "repositories") switch v := spec.(type) { + case manifestTagsPath: + return path.Join(append(repoPrefix, v.name, "manifests")...), nil case manifestPathSpec: // TODO(sday): May need to store manifest by architecture. return path.Join(append(repoPrefix, v.name, "manifests", v.tag)...), nil @@ -109,6 +111,14 @@ type pathSpec interface { pathSpec() } +// manifestTagsPath describes the path elements required to point to the +// directory with all manifest tags under the repository. +type manifestTagsPath struct { + name string +} + +func (manifestTagsPath) pathSpec() {} + // manifestPathSpec describes the path elements used to build a manifest path. // The contents should be a signed manifest json file. type manifestPathSpec struct { diff --git a/storage/services.go b/storage/services.go index 1f6d5e51..da6d88c5 100644 --- a/storage/services.go +++ b/storage/services.go @@ -52,6 +52,9 @@ func (ss *Services) Manifests() ManifestService { // ManifestService provides operations on image manifests. type ManifestService interface { + // Tags lists the tags under the named repository. + Tags(name string) ([]string, error) + // Exists returns true if the layer exists. Exists(name, tag string) (bool, error) From 6cbd22c5f0844f8d35789f2d98b3815b0481081d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 13:38:07 -0800 Subject: [PATCH 107/165] Implement tags HTTP API handler --- tags.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/tags.go b/tags.go index d8cea3d3..4916c151 100644 --- a/tags.go +++ b/tags.go @@ -1,8 +1,10 @@ package registry import ( + "encoding/json" "net/http" + "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) @@ -22,7 +24,34 @@ type tagsHandler struct { *Context } +type tagsAPIResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { - // TODO(stevvooe): Implement this method. + defer r.Body.Close() + manifests := th.services.Manifests() + + tags, err := manifests.Tags(th.Name) + if err != nil { + switch err := err.(type) { + case storage.ErrUnknownRepository: + w.WriteHeader(404) + th.Errors.Push(ErrorCodeUnknownRepository, map[string]string{"name": th.Name}) + default: + th.Errors.PushErr(err) + } + return + } + + enc := json.NewEncoder(w) + if err := enc.Encode(tagsAPIResponse{ + Name: th.Name, + Tags: tags, + }); err != nil { + th.Errors.PushErr(err) + return + } } From 49d13f9a083e01838242476853ec4c752bd658db Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 13:40:44 -0800 Subject: [PATCH 108/165] Move manifest store errors to where they happen --- storage/manifest.go | 49 +--------------------------------------- storage/manifeststore.go | 44 ++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 48 deletions(-) diff --git a/storage/manifest.go b/storage/manifest.go index 6c506244..88782c53 100644 --- a/storage/manifest.go +++ b/storage/manifest.go @@ -3,59 +3,12 @@ package storage import ( "crypto/x509" "encoding/json" - "fmt" - "strings" "github.com/Sirupsen/logrus" - - "github.com/docker/libtrust" - "github.com/docker/docker-registry/digest" + "github.com/docker/libtrust" ) -// ErrUnknownRepository is returned if the named repository is not known by -// the registry. -type ErrUnknownRepository struct { - Name string -} - -func (err ErrUnknownRepository) Error() string { - return fmt.Sprintf("unknown respository name=%s", err.Name) -} - -// ErrUnknownManifest is returned if the manifest is not known by the -// registry. -type ErrUnknownManifest struct { - Name string - Tag string -} - -func (err ErrUnknownManifest) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - // Versioned provides a struct with just the manifest schemaVersion. Incoming // content with unknown schema version can be decoded against this struct to // check the version. diff --git a/storage/manifeststore.go b/storage/manifeststore.go index 33b16390..c7833a7c 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -4,11 +4,55 @@ import ( "encoding/json" "fmt" "path" + "strings" "github.com/docker/docker-registry/storagedriver" "github.com/docker/libtrust" ) +// ErrUnknownRepository is returned if the named repository is not known by +// the registry. +type ErrUnknownRepository struct { + Name string +} + +func (err ErrUnknownRepository) Error() string { + return fmt.Sprintf("unknown respository name=%s", err.Name) +} + +// ErrUnknownManifest is returned if the manifest is not known by the +// registry. +type ErrUnknownManifest struct { + Name string + Tag string +} + +func (err ErrUnknownManifest) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + type manifestStore struct { driver storagedriver.StorageDriver pathMapper *pathMapper From 33b2b80a8c01c7c6c066ad5a182fd58519ae42e7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 14:19:07 -0800 Subject: [PATCH 109/165] Remove errant log message --- storage/manifeststore.go | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/manifeststore.go b/storage/manifeststore.go index c7833a7c..69a48d5f 100644 --- a/storage/manifeststore.go +++ b/storage/manifeststore.go @@ -72,7 +72,6 @@ func (ms *manifestStore) Tags(name string) ([]string, error) { var tags []string entries, err := ms.driver.List(p) if err != nil { - logrus.Infof("%#v", err) switch err := err.(type) { case storagedriver.PathNotFoundError: return nil, ErrUnknownRepository{Name: name} From 10e5276c0ec89f2cb349bb8fe8421ae03f0af72a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 14:19:38 -0800 Subject: [PATCH 110/165] Add error code for unknown repository --- errors.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/errors.go b/errors.go index 9593741d..17758f44 100644 --- a/errors.go +++ b/errors.go @@ -34,6 +34,9 @@ const ( // match the provided tag. ErrorCodeInvalidTag + // ErrorCodeUnknownRepository when the repository name is not known. + ErrorCodeUnknownRepository + // ErrorCodeUnknownManifest returned when image manifest name and tag is // unknown, accompanied by a 404 status. ErrorCodeUnknownManifest @@ -64,6 +67,7 @@ var errorCodeStrings = map[ErrorCode]string{ ErrorCodeInvalidLength: "INVALID_LENGTH", ErrorCodeInvalidName: "INVALID_NAME", ErrorCodeInvalidTag: "INVALID_TAG", + ErrorCodeUnknownRepository: "UNKNOWN_REPOSITORY", ErrorCodeUnknownManifest: "UNKNOWN_MANIFEST", ErrorCodeInvalidManifest: "INVALID_MANIFEST", ErrorCodeUnverifiedManifest: "UNVERIFIED_MANIFEST", @@ -78,6 +82,7 @@ var errorCodesMessages = map[ErrorCode]string{ ErrorCodeInvalidLength: "provided length did not match content length", ErrorCodeInvalidName: "manifest name did not match URI", ErrorCodeInvalidTag: "manifest tag did not match URI", + ErrorCodeUnknownRepository: "repository not known to registry", ErrorCodeUnknownManifest: "manifest not known", ErrorCodeInvalidManifest: "manifest is invalid", ErrorCodeUnverifiedManifest: "manifest failed signature validation", From 722ca3584118a18786898bde42f9368b3749efaf Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 15:36:26 -0800 Subject: [PATCH 111/165] Test Tags HTTP API methods during manifest upload --- api_test.go | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++ urls.go | 14 ++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/api_test.go b/api_test.go index 41f3de69..15ba0ca6 100644 --- a/api_test.go +++ b/api_test.go @@ -195,6 +195,32 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected manifest unknown error: got %v", respErrs) } + tagsURL, err := builder.buildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&respErrs); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if len(respErrs.Errors) == 0 { + t.Fatalf("expected errors in response") + } + + if respErrs.Errors[0].Code != ErrorCodeUnknownRepository { + t.Fatalf("expected respository unknown error: got %v", respErrs) + } + // -------------------------------- // Attempt to push unsigned manifest with missing layers unsignedManifest := &storage.Manifest{ @@ -300,6 +326,35 @@ func TestManifestAPI(t *testing.T) { if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { t.Fatalf("manifests do not match") } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } } func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { diff --git a/urls.go b/urls.go index d9e77f5e..8f34a5b1 100644 --- a/urls.go +++ b/urls.go @@ -39,6 +39,20 @@ func newURLBuilderFromString(root string) (*urlBuilder, error) { return newURLBuilder(u), nil } +func (ub *urlBuilder) buildTagsURL(name string) (string, error) { + route := clonedRoute(ub.router, routeNameTags) + + tagsURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL("name", name) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + func (ub *urlBuilder) forManifest(m *storage.Manifest) (string, error) { return ub.buildManifestURL(m.Name, m.Tag) } From 94052ea2130292f47a9f34131d7d5ef8d4ebb7d7 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 9 Dec 2014 17:20:10 -0800 Subject: [PATCH 112/165] Fixes normalization of inmemory file paths A normalized path always begins with "/" and never has a trailing slash unless it is the root directory. --- storagedriver/inmemory/driver.go | 20 ++++++-------------- storagedriver/inmemory/mfs.go | 14 ++++++++------ 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 0b68e021..841ce56c 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/ioutil" - "strings" "sync" "time" @@ -87,7 +86,7 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } - path = d.normalize(path) + path = normalize(path) found := d.root.find(path) if found.path() != path { @@ -111,7 +110,7 @@ func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn in return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } - normalized := d.normalize(path) + normalized := normalize(path) f, err := d.root.mkfile(normalized) if err != nil { @@ -139,7 +138,7 @@ func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) { d.mutex.RLock() defer d.mutex.RUnlock() - normalized := d.normalize(path) + normalized := normalize(path) found := d.root.find(path) if found.path() != normalized { @@ -162,7 +161,7 @@ func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(path string) ([]string, error) { - normalized := d.normalize(path) + normalized := normalize(path) found := d.root.find(normalized) @@ -192,7 +191,7 @@ func (d *Driver) Move(sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() - normalizedSrc, normalizedDst := d.normalize(sourcePath), d.normalize(destPath) + normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) err := d.root.move(normalizedSrc, normalizedDst) switch err { @@ -208,7 +207,7 @@ func (d *Driver) Delete(path string) error { d.mutex.Lock() defer d.mutex.Unlock() - normalized := d.normalize(path) + normalized := normalize(path) err := d.root.delete(normalized) switch err { @@ -218,10 +217,3 @@ func (d *Driver) Delete(path string) error { return err } } - -func (d *Driver) normalize(p string) string { - if !strings.HasPrefix(p, "/") { - p = "/" + p // Ghetto path absolution. - } - return p -} diff --git a/storagedriver/inmemory/mfs.go b/storagedriver/inmemory/mfs.go index 9eeac0da..c5797238 100644 --- a/storagedriver/inmemory/mfs.go +++ b/storagedriver/inmemory/mfs.go @@ -11,7 +11,7 @@ import ( var ( errExists = fmt.Errorf("exists") - errNotExists = fmt.Errorf("exists") + errNotExists = fmt.Errorf("notexists") errIsNotDir = fmt.Errorf("notdir") errIsDir = fmt.Errorf("isdir") ) @@ -139,9 +139,7 @@ func (d *dir) mkfile(p string) (*file, error) { // mkdirs creates any missing directory entries in p and returns the result. func (d *dir) mkdirs(p string) (*dir, error) { - if p == "" { - p = "/" - } + p = normalize(p) n := d.find(p) @@ -210,7 +208,7 @@ func (d *dir) move(src, dst string) error { srcDirname, srcFilename := path.Split(src) sp := d.find(srcDirname) - if srcDirname != strings.TrimSuffix(sp.path(), "/")+"/" { + if normalize(srcDirname) != normalize(sp.path()) { return errNotExists } @@ -237,7 +235,7 @@ func (d *dir) delete(p string) error { dirname, filename := path.Split(p) parent := d.find(dirname) - if dirname != strings.TrimSuffix(parent.path(), "/")+"/" { + if normalize(dirname) != normalize(parent.path()) { return errNotExists } @@ -327,3 +325,7 @@ func (c *common) path() string { func (c *common) modtime() time.Time { return c.mod } + +func normalize(p string) string { + return "/" + strings.Trim(p, "/") +} From d375e264e1ab012af0ea267fe9c6a65a906e3351 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 9 Dec 2014 18:04:05 -0800 Subject: [PATCH 113/165] Makes circle.yml run tests with the "-test.short" flag This is due to longer tests which require more storage/memory than is granted to us by the circle test runner vms. --- circle.yml | 2 +- storagedriver/testsuites/testsuites.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/circle.yml b/circle.yml index 16a6c817..3309f55b 100644 --- a/circle.yml +++ b/circle.yml @@ -21,7 +21,7 @@ test: - test -z $(gofmt -s -l . | tee /dev/stderr) - go vet ./... - test -z $(golint ./... | tee /dev/stderr) - - go test -test.v ./... + - go test -test.v -test.short ./... # Disabling the race detector due to massive memory usage. # - go test -race -test.v ./...: diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 1abda715..3aa8642c 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -213,7 +213,7 @@ func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { defer suite.StorageDriver.Delete(firstPart(filename)) checksum := sha1.New() - var offset int64 = 0 + var offset int64 var chunkSize int64 = 1024 * 1024 for i := 0; i < 5*1024; i++ { From 8dcec82212c00929e122f8a6e400a9ba6ef49479 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Wed, 10 Dec 2014 10:10:20 -0800 Subject: [PATCH 114/165] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index d6456956..86eb6608 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From bbf288a8088f0ab271e235da1105eead2302ebe8 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 10:51:07 -0800 Subject: [PATCH 115/165] Adds another error test case for reading nonexistent files --- storagedriver/testsuites/testsuites.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 3aa8642c..abf95855 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -396,9 +396,14 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { // fails. func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomPath(32) + _, err := suite.StorageDriver.ReadStream(filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + _, err = suite.StorageDriver.ReadStream(filename, 64) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } // TestList checks the returned list of keys after populating a directory tree. From 14a072cd5fcc2edeb1823ef3793a53583c382ac7 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 10:53:51 -0800 Subject: [PATCH 116/165] Improves storagedriver tests for Move command semantics Ensures that Move will properly overwrite the file at the destination location. Also checks that Move of a nonexistent source file will NOT delete the file at the destination. --- storagedriver/testsuites/testsuites.go | 44 ++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index abf95855..bbdc6a1c 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -466,14 +466,54 @@ func (suite *DriverSuite) TestMove(c *check.C) { c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) } -// TestMoveNonexistent checks that moving a nonexistent key fails +// TestMoveOverwrite checks that a moved object no longer exists at the source +// path and overwrites the contents at the destination. +func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { + sourcePath := randomPath(32) + destPath := randomPath(32) + sourceContents := randomContents(32) + destContents := randomContents(64) + + defer suite.StorageDriver.Delete(firstPart(sourcePath)) + defer suite.StorageDriver.Delete(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(sourcePath, sourceContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.PutContent(destPath, destContents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, sourceContents) + + _, err = suite.StorageDriver.GetContent(sourcePath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) +} + +// TestMoveNonexistent checks that moving a nonexistent key fails and does not +// delete the data at the destination path. func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { + contents := randomContents(32) sourcePath := randomPath(32) destPath := randomPath(32) - err := suite.StorageDriver.Move(sourcePath, destPath) + defer suite.StorageDriver.Delete(firstPart(destPath)) + + err := suite.StorageDriver.PutContent(destPath, contents) + c.Assert(err, check.IsNil) + + err = suite.StorageDriver.Move(sourcePath, destPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + + received, err := suite.StorageDriver.GetContent(destPath) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) } // TestDelete checks that the delete operation removes data from the storage From cb25cc65bf6ff07628bde33013657a31bd41ee60 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 10:55:33 -0800 Subject: [PATCH 117/165] Fixes storagedriver Stat test Checks Stat on the directory before creating the file to make sure that it does not exist Properly cleans up after the test. --- storagedriver/testsuites/testsuites.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index bbdc6a1c..d649a173 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -598,10 +598,15 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) - defer suite.StorageDriver.Delete(dirPath) + defer suite.StorageDriver.Delete(firstPart(dirPath)) // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(filePath) + fi, err := suite.StorageDriver.Stat(dirPath) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) + c.Assert(fi, check.IsNil) + + fi, err = suite.StorageDriver.Stat(filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(fi, check.IsNil) From 9297693675a1c0c93feb1c2a35f71fb4c57f71b4 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 10:57:47 -0800 Subject: [PATCH 118/165] Improves storagedriver concurrency testing Creates trees instead of flat files for TestConcurrentFileStreams Adds TestConcurrentStreamReads, which writes a large file (smaller in Short mode), and then ensures that several concurrent readers properly read their portions of the file with random offsets --- storagedriver/testsuites/testsuites.go | 47 +++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index d649a173..b568a9c8 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -651,9 +651,46 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { } } +// TestConcurrentStreamReads checks that multiple clients can safely read from +// the same file simultaneously with various offsets. +func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { + var filesize int64 = 128 * 1024 * 1024 + + if testing.Short() { + filesize = 10 * 1024 * 1024 + c.Log("Reducing file size to 10MB for short mode") + } + + filename := randomPath(32) + contents := randomContents(filesize) + + defer suite.StorageDriver.Delete(firstPart(filename)) + + err := suite.StorageDriver.PutContent(filename, contents) + c.Assert(err, check.IsNil) + + var wg sync.WaitGroup + + readContents := func() { + defer wg.Done() + offset := rand.Int63n(int64(len(contents))) + reader, err := suite.StorageDriver.ReadStream(filename, offset) + c.Assert(err, check.IsNil) + + readContents, err := ioutil.ReadAll(reader) + c.Assert(err, check.IsNil) + c.Assert(readContents, check.DeepEquals, contents[offset:]) + } + + wg.Add(10) + for i := 0; i < 10; i++ { + go readContents() + } + wg.Wait() +} + // TestConcurrentFileStreams checks that multiple *os.File objects can be passed // in to WriteStream concurrently without hanging. -// TODO(bbland): fix this test... func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { // if _, isIPC := suite.StorageDriver.(*ipc.StorageDriverClient); isIPC { // c.Skip("Need to fix out-of-process concurrency") @@ -682,8 +719,8 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { c.Assert(err, check.IsNil) defer os.Remove(tf.Name()) - tfName := path.Base(tf.Name()) - defer suite.StorageDriver.Delete(tfName) + filename := randomPath(32) + defer suite.StorageDriver.Delete(firstPart(filename)) contents := randomContents(size) @@ -693,11 +730,11 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf.Sync() tf.Seek(0, os.SEEK_SET) - nn, err := suite.StorageDriver.WriteStream(tfName, 0, tf) + nn, err := suite.StorageDriver.WriteStream(filename, 0, tf) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, size) - reader, err := suite.StorageDriver.ReadStream(tfName, 0) + reader, err := suite.StorageDriver.ReadStream(filename, 0) c.Assert(err, check.IsNil) defer reader.Close() From 8a555bbb5fe171f28e1171712550ffc6f5755ce9 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 11:09:14 -0800 Subject: [PATCH 119/165] Attempts to fix go vet for circle --- client/client_test.go | 60 +++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/client/client_test.go b/client/client_test.go index d4a335ec..5eaf6b97 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -90,18 +90,16 @@ func TestPush(t *testing.T) { } } - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ - { - Request: testutil.Request{ - Method: "PUT", - Route: "/v2/" + name + "/manifest/" + tag, - Body: manifestBytes, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - }, + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + name + "/manifest/" + tag, + Body: manifestBytes, }, - }...)) + Response: testutil.Response{ + StatusCode: http.StatusOK, + }, + })) server := httptest.NewServer(handler) client := New(server.URL) objectStore := &memoryObjectStore{ @@ -183,18 +181,16 @@ func TestPull(t *testing.T) { } } - handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMap{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifest/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, + handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/manifest/" + tag, }, - }...)) + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + })) server := httptest.NewServer(handler) client := New(server.URL) objectStore := &memoryObjectStore{ @@ -306,18 +302,16 @@ func TestPullResume(t *testing.T) { } for i := 0; i < 3; i++ { - layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMap{ - { - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + name + "/manifest/" + tag, - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: manifestBytes, - }, + layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + name + "/manifest/" + tag, }, - }...) + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: manifestBytes, + }, + }) } handler := testutil.NewHandler(layerRequestResponseMappings) From 33d89b4bca7ef5c7e453e427773522cf68d9b3b8 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 21:06:51 -0800 Subject: [PATCH 120/165] Disabling go vet until we get bug fixed --- circle.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 3309f55b..c5ce9b08 100644 --- a/circle.yml +++ b/circle.yml @@ -19,7 +19,12 @@ test: - go version override: - test -z $(gofmt -s -l . | tee /dev/stderr) - - go vet ./... + + # TODO(stevvooe): go vet is complaining about something that can't be + # reproduced locally and doesn't make sense based on the existing code. + # Turning it off for now. + # - go vet ./... + - test -z $(golint ./... | tee /dev/stderr) - go test -test.v -test.short ./... From 7b56d100760ec19616d0f4985a6164022baa0d8e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 9 Dec 2014 21:25:54 -0800 Subject: [PATCH 121/165] Lock down HTTP API error codes This commit locks down the set of http error codes that will be part of the inital V2 specification, proposed in docker/docker#9015. The naming order has been slightly changed and there are few tweaks to ensure all conditions are captured but this will be set the docker core will be impleemnted against. To support this, the errors have been moved into an api/errors package. A new type, ErrorDescriptor, has been defined to centralize the code, message and definitions used with each type. The information therein can be used to generate documentation and response code mappings (yet to come...). In addition to the refactoring that came along with this change, several tests have been added to ensure serialization round trips are reliable. This allows better support for using these error types on the client and server side. This is coupled with some tweaks in the client code to fix issues with error reporting. Other fixes in the client include moving client-specific errors out of the base package and ensuring that we have correct parameters for finishing uploads. --- api/errors/descriptors.go | 135 +++++++++++++++++ api/errors/errors.go | 193 +++++++++++++++++++++++ api/errors/errors_test.go | 165 ++++++++++++++++++++ api_test.go | 30 ++-- client/client.go | 136 ++++++++--------- client/client_test.go | 35 ++++- client/errors.go | 79 ++++++++++ context.go | 7 +- errors.go | 311 -------------------------------------- errors_test.go | 90 ----------- images.go | 16 +- layer.go | 7 +- layerupload.go | 23 +-- tags.go | 3 +- 14 files changed, 716 insertions(+), 514 deletions(-) create mode 100644 api/errors/descriptors.go create mode 100644 api/errors/errors.go create mode 100644 api/errors/errors_test.go create mode 100644 client/errors.go delete mode 100644 errors.go delete mode 100644 errors_test.go diff --git a/api/errors/descriptors.go b/api/errors/descriptors.go new file mode 100644 index 00000000..e3dfd37b --- /dev/null +++ b/api/errors/descriptors.go @@ -0,0 +1,135 @@ +package errors + +import "net/http" + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // DefaultStatusCode should to be returned via the HTTP API. Some error + // may have different status codes depending on the situation. + DefaultStatusCode int +} + +var descriptors = []ErrorDescriptor{ + { + Code: ErrorCodeUnknown, + Value: "UNKNOWN", + Message: "unknown error", + }, + { + Code: ErrorCodeDigestInvalid, + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeSizeInvalid, + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeNameInvalid, + Value: "NAME_INVALID", + Message: "manifest name did not match URI", + Description: `During a manifest upload, if the name in the manifest + does not match the uri name, this error will be returned.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeTagInvalid, + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeNameUnknown, + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + DefaultStatusCode: http.StatusNotFound, + }, + { + Code: ErrorCodeManifestUnknown, + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + DefaultStatusCode: http.StatusNotFound, + }, + { + Code: ErrorCodeManifestInvalid, + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeManifestUnverified, + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + DefaultStatusCode: http.StatusBadRequest, + }, + { + Code: ErrorCodeBlobUnknown, + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + DefaultStatusCode: http.StatusNotFound, + }, + + { + Code: ErrorCodeBlobUploadUnknown, + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + DefaultStatusCode: http.StatusNotFound, + }, +} + +var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor +var idToDescriptors map[string]ErrorDescriptor + +func init() { + errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(descriptors)) + idToDescriptors = make(map[string]ErrorDescriptor, len(descriptors)) + + for _, descriptor := range descriptors { + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + } +} diff --git a/api/errors/errors.go b/api/errors/errors.go new file mode 100644 index 00000000..b6e64e2a --- /dev/null +++ b/api/errors/errors.go @@ -0,0 +1,193 @@ +// Package errors describes the error codes that may be returned via the +// Docker Registry JSON HTTP API V2. In addition to declaractions, +// descriptions about the error codes and the conditions causing them are +// avialable in detail. +// +// Error definitions here are considered to be locked down for the V2 registry +// api. Any changes must be considered carefully and should not proceed +// without a change proposal in docker core. +package errors + +import ( + "fmt" + "strings" +) + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +const ( + // ErrorCodeUnknown is a catch-all for errors not defined below. + ErrorCodeUnknown ErrorCode = iota + + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + // size does not match the content length. + ErrorCodeSizeInvalid + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verfication. + ErrorCodeManifestUnverified + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown +) + +// ParseErrorCode attempts to parse the error code string, returning +// ErrorCodeUnknown if the error is not known. +func ParseErrorCode(s string) ErrorCode { + desc, ok := idToDescriptors[s] + + if !ok { + return ErrorCodeUnknown + } + + return desc.Code +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", + strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), + e.Message) +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors struct { + Errors []Error `json:"errors,omitempty"` +} + +// Push pushes an error on to the error stack, with the optional detail +// argument. It is a programming error (ie panic) to push more than one +// detail at a time. +func (errs *Errors) Push(code ErrorCode, details ...interface{}) { + if len(details) > 1 { + panic("please specify zero or one detail items for this error") + } + + var detail interface{} + if len(details) > 0 { + detail = details[0] + } + + if err, ok := detail.(error); ok { + detail = err.Error() + } + + errs.PushErr(Error{ + Code: code, + Message: code.Message(), + Detail: detail, + }) +} + +// PushErr pushes an error interface onto the error stack. +func (errs *Errors) PushErr(err error) { + switch err.(type) { + case Error: + errs.Errors = append(errs.Errors, err.(Error)) + default: + errs.Errors = append(errs.Errors, Error{Message: err.Error()}) + } +} + +func (errs *Errors) Error() string { + switch errs.Len() { + case 0: + return "" + case 1: + return errs.Errors[0].Error() + default: + msg := "errors:\n" + for _, err := range errs.Errors { + msg += err.Error() + "\n" + } + return msg + } +} + +// Clear clears the errors. +func (errs *Errors) Clear() { + errs.Errors = errs.Errors[:0] +} + +// Len returns the current number of errors. +func (errs *Errors) Len() int { + return len(errs.Errors) +} diff --git a/api/errors/errors_test.go b/api/errors/errors_test.go new file mode 100644 index 00000000..f0712bef --- /dev/null +++ b/api/errors/errors_test.go @@ -0,0 +1,165 @@ +package errors + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/docker/docker-registry/digest" +) + +// TestErrorCodes ensures that error code format, mappings and +// marshaling/unmarshaling. round trips are stable. +func TestErrorCodes(t *testing.T) { + for _, desc := range descriptors { + if desc.Code.String() != desc.Value { + t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) + } + + if desc.Code.Message() != desc.Message { + t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) + } + + // Serialize the error code using the json library to ensure that we + // get a string and it works round trip. + p, err := json.Marshal(desc.Code) + + if err != nil { + t.Fatalf("error marshaling error code %v: %v", desc.Code, err) + } + + if len(p) <= 0 { + t.Fatalf("expected content in marshaled before for error code %v", desc.Code) + } + + // First, unmarshal to interface and ensure we have a string. + var ecUnspecified interface{} + if err := json.Unmarshal(p, &ecUnspecified); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + } + + if _, ok := ecUnspecified.(string); !ok { + t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) + } + + // Now, unmarshal with the error code type and ensure they are equal + var ecUnmarshaled ErrorCode + if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { + t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) + } + + if ecUnmarshaled != desc.Code { + t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) + } + } +} + +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +func TestErrorsManagement(t *testing.T) { + var errs Errors + + errs.Push(ErrorCodeDigestInvalid) + errs.Push(ErrorCodeBlobUnknown, + map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) + + p, err := json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } + + errs.Clear() + errs.Push(ErrorCodeUnknown) + expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" + p, err = json.Marshal(errs) + + if err != nil { + t.Fatalf("error marashaling errors: %v", err) + } + + if string(p) != expectedJSON { + t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) + } +} + +// TestMarshalUnmarshal ensures that api errors can round trip through json +// without losing information. +func TestMarshalUnmarshal(t *testing.T) { + + var errors Errors + + for _, testcase := range []struct { + description string + err Error + }{ + { + description: "unknown error", + err: Error{ + + Code: ErrorCodeUnknown, + Message: ErrorCodeUnknown.Descriptor().Message, + }, + }, + { + description: "unknown manifest", + err: Error{ + Code: ErrorCodeManifestUnknown, + Message: ErrorCodeManifestUnknown.Descriptor().Message, + }, + }, + { + description: "unknown manifest", + err: Error{ + Code: ErrorCodeBlobUnknown, + Message: ErrorCodeBlobUnknown.Descriptor().Message, + Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, + }, + }, + } { + fatalf := func(format string, args ...interface{}) { + t.Fatalf(testcase.description+": "+format, args...) + } + + unexpectedErr := func(err error) { + fatalf("unexpected error: %v", err) + } + + p, err := json.Marshal(testcase.err) + if err != nil { + unexpectedErr(err) + } + + var unmarshaled Error + if err := json.Unmarshal(p, &unmarshaled); err != nil { + unexpectedErr(err) + } + + if !reflect.DeepEqual(unmarshaled, testcase.err) { + fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) + } + + // Roll everything up into an error response envelope. + errors.PushErr(testcase.err) + } + + p, err := json.Marshal(errors) + if err != nil { + t.Fatalf("unexpected error marshaling error envelope: %v", err) + } + + var unmarshaled Errors + if err := json.Unmarshal(p, &unmarshaled); err != nil { + t.Fatalf("unexpected error unmarshaling error envelope: %v", err) + } + + if !reflect.DeepEqual(unmarshaled, errors) { + t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) + } +} diff --git a/api_test.go b/api_test.go index 15ba0ca6..2c832a17 100644 --- a/api_test.go +++ b/api_test.go @@ -12,16 +12,14 @@ import ( "os" "testing" - "github.com/docker/libtrust" - - "github.com/docker/docker-registry/storage" - _ "github.com/docker/docker-registry/storagedriver/inmemory" - - "github.com/gorilla/handlers" - + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/common/testutil" "github.com/docker/docker-registry/configuration" "github.com/docker/docker-registry/digest" + "github.com/docker/docker-registry/storage" + _ "github.com/docker/docker-registry/storagedriver/inmemory" + "github.com/docker/libtrust" + "github.com/gorilla/handlers" ) // TestLayerAPI conducts a full of the of the layer api. @@ -133,6 +131,10 @@ func TestLayerAPI(t *testing.T) { if !verifier.Verified() { t.Fatalf("response body did not pass verification") } + + // Missing tests: + // - Upload the same tarsum file under and different repository and + // ensure the content remains uncorrupted. } func TestManifestAPI(t *testing.T) { @@ -180,9 +182,7 @@ func TestManifestAPI(t *testing.T) { // } dec := json.NewDecoder(resp.Body) - var respErrs struct { - Errors []Error - } + var respErrs errors.Errors if err := dec.Decode(&respErrs); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } @@ -191,7 +191,7 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected errors in response") } - if respErrs.Errors[0].Code != ErrorCodeUnknownManifest { + if respErrs.Errors[0].Code != errors.ErrorCodeManifestUnknown { t.Fatalf("expected manifest unknown error: got %v", respErrs) } @@ -217,7 +217,7 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected errors in response") } - if respErrs.Errors[0].Code != ErrorCodeUnknownRepository { + if respErrs.Errors[0].Code != errors.ErrorCodeNameUnknown { t.Fatalf("expected respository unknown error: got %v", respErrs) } @@ -251,11 +251,11 @@ func TestManifestAPI(t *testing.T) { for _, err := range respErrs.Errors { switch err.Code { - case ErrorCodeUnverifiedManifest: + case errors.ErrorCodeManifestUnverified: unverified++ - case ErrorCodeUnknownLayer: + case errors.ErrorCodeBlobUnknown: missingLayers++ - case ErrorCodeInvalidDigest: + case errors.ErrorCodeDigestInvalid: // TODO(stevvooe): This error isn't quite descriptive enough -- // the layer with an invalid digest isn't identified. invalidDigests++ diff --git a/client/client.go b/client/client.go index e51476cd..0d343209 100644 --- a/client/client.go +++ b/client/client.go @@ -10,7 +10,7 @@ import ( "regexp" "strconv" - "github.com/docker/docker-registry" + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" ) @@ -94,17 +94,18 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest case response.StatusCode == http.StatusOK: break case response.StatusCode == http.StatusNotFound: - return nil, ®istry.ImageManifestNotFoundError{Name: name, Tag: tag} + return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors + decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return nil, err } - return nil, errors + return nil, &errs default: - return nil, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return nil, &UnexpectedHTTPStatusError{Status: response.Status} } decoder := json.NewDecoder(response.Body) @@ -118,13 +119,8 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest } func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.SignedManifest) error { - manifestBytes, err := json.Marshal(manifest) - if err != nil { - return err - } - putRequest, err := http.NewRequest("PUT", - r.imageManifestURL(name, tag), bytes.NewReader(manifestBytes)) + r.imageManifestURL(name, tag), bytes.NewReader(manifest.Raw)) if err != nil { return err } @@ -140,15 +136,16 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.Signed case response.StatusCode == http.StatusOK: return nil case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errors errors.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errors) if err != nil { return err } - return errors + + return &errors default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -170,17 +167,17 @@ func (r *clientImpl) DeleteImage(name, tag string) error { case response.StatusCode == http.StatusNoContent: break case response.StatusCode == http.StatusNotFound: - return ®istry.ImageManifestNotFoundError{Name: name, Tag: tag} + return &ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return err } - return errors + return &errs default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } return nil @@ -198,17 +195,17 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { case response.StatusCode == http.StatusOK: break case response.StatusCode == http.StatusNotFound: - return nil, ®istry.RepositoryNotFoundError{Name: name} + return nil, &RepositoryNotFoundError{Name: name} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return nil, err } - return nil, errors + return nil, &errs default: - return nil, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return nil, &UnexpectedHTTPStatusError{Status: response.Status} } tags := struct { @@ -235,7 +232,7 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { switch { case response.StatusCode == http.StatusOK: lengthHeader := response.Header.Get("Content-Length") - length, err := strconv.ParseInt(lengthHeader, 10, 0) + length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return -1, err } @@ -243,16 +240,16 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { case response.StatusCode == http.StatusNotFound: return -1, nil case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return -1, err } - return -1, errors + return -1, &errs default: response.Body.Close() - return -1, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return -1, &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -280,18 +277,18 @@ func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (i return response.Body, int(length), nil case response.StatusCode == http.StatusNotFound: response.Body.Close() - return nil, 0, ®istry.BlobNotFoundError{Name: name, Digest: dgst} + return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return nil, 0, err } - return nil, 0, errors + return nil, 0, &errs default: response.Body.Close() - return nil, 0, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return nil, 0, &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -315,20 +312,20 @@ func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { // case response.StatusCode == http.StatusNotFound: // return case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return "", err } - return "", errors + return "", &errs default: - return "", ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return "", &UnexpectedHTTPStatusError{Status: response.Status} } } func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { - response, err := http.Get(fmt.Sprintf("%s%s", r.Endpoint, location)) + response, err := http.Get(location) if err != nil { return 0, 0, err } @@ -339,31 +336,30 @@ func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { case response.StatusCode == http.StatusNoContent: return parseRangeHeader(response.Header.Get("Range")) case response.StatusCode == http.StatusNotFound: - return 0, 0, ®istry.BlobUploadNotFoundError{Location: location} + return 0, 0, &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return 0, 0, err } - return 0, 0, errors + return 0, 0, &errs default: - return 0, 0, ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return 0, 0, &UnexpectedHTTPStatusError{Status: response.Status} } } func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, dgst digest.Digest) error { defer blob.Close() - putRequest, err := http.NewRequest("PUT", - fmt.Sprintf("%s%s", r.Endpoint, location), blob) + putRequest, err := http.NewRequest("PUT", location, blob) if err != nil { return err } queryValues := url.Values{} - queryValues.Set("length", fmt.Sprint(length)) + queryValues.Set("size", fmt.Sprint(length)) queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() @@ -381,17 +377,17 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.BlobUploadNotFoundError{Location: location} + return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return err } - return errors + return &errs default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -426,23 +422,23 @@ func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, l if err != nil { return err } - return ®istry.BlobUploadInvalidRangeError{ + return &BlobUploadInvalidRangeError{ Location: location, LastValidRange: lastValidRange, BlobSize: blobSize, } case response.StatusCode == http.StatusNotFound: - return ®istry.BlobUploadNotFoundError{Location: location} + return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return err } - return errors + return &errs default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -454,7 +450,7 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst d } queryValues := new(url.Values) - queryValues.Set("length", fmt.Sprint(length)) + queryValues.Set("size", fmt.Sprint(length)) queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() @@ -474,17 +470,17 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst d case response.StatusCode == http.StatusCreated: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.BlobUploadNotFoundError{Location: location} + return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return err } - return errors + return &errs default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } } @@ -506,17 +502,17 @@ func (r *clientImpl) CancelBlobUpload(location string) error { case response.StatusCode == http.StatusNoContent: return nil case response.StatusCode == http.StatusNotFound: - return ®istry.BlobUploadNotFoundError{Location: location} + return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - errors := new(registry.Errors) + var errs errors.Errors decoder := json.NewDecoder(response.Body) - err = decoder.Decode(&errors) + err = decoder.Decode(&errs) if err != nil { return err } - return errors + return &errs default: - return ®istry.UnexpectedHTTPStatusError{Status: response.Status} + return &UnexpectedHTTPStatusError{Status: response.Status} } } diff --git a/client/client_test.go b/client/client_test.go index 5eaf6b97..979f1313 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -58,7 +58,8 @@ func TestPush(t *testing.T) { }, }, } - manifestBytes, err := json.Marshal(manifest) + var err error + manifest.Raw, err = json.Marshal(manifest) blobRequestResponseMappings := make([]testutil.RequestResponseMapping, 2*len(testBlobs)) for i, blob := range testBlobs { @@ -94,13 +95,25 @@ func TestPush(t *testing.T) { Request: testutil.Request{ Method: "PUT", Route: "/v2/" + name + "/manifest/" + tag, - Body: manifestBytes, + Body: manifest.Raw, }, Response: testutil.Response{ StatusCode: http.StatusOK, }, })) - server := httptest.NewServer(handler) + var server *httptest.Server + + // HACK(stevvooe): Super hack to follow: the request response map approach + // above does not let us correctly format the location header to the + // server url. This handler intercepts and re-writes the location header + // to the server url. + + hack := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w = &headerInterceptingResponseWriter{ResponseWriter: w, serverURL: server.URL} + handler.ServeHTTP(w, r) + }) + + server = httptest.NewServer(hack) client := New(server.URL) objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), @@ -370,3 +383,19 @@ func TestPullResume(t *testing.T) { } } } + +// headerInterceptingResponseWriter is a hacky workaround to re-write the +// location header to have the server url. +type headerInterceptingResponseWriter struct { + http.ResponseWriter + serverURL string +} + +func (hirw *headerInterceptingResponseWriter) WriteHeader(status int) { + location := hirw.Header().Get("Location") + if location != "" { + hirw.Header().Set("Location", hirw.serverURL+location) + } + + hirw.ResponseWriter.WriteHeader(status) +} diff --git a/client/errors.go b/client/errors.go new file mode 100644 index 00000000..9bb2e40f --- /dev/null +++ b/client/errors.go @@ -0,0 +1,79 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker-registry/digest" +) + +// RepositoryNotFoundError is returned when making an operation against a +// repository that does not exist in the registry. +type RepositoryNotFoundError struct { + Name string +} + +func (e *RepositoryNotFoundError) Error() string { + return fmt.Sprintf("No repository found with Name: %s", e.Name) +} + +// ImageManifestNotFoundError is returned when making an operation against a +// given image manifest that does not exist in the registry. +type ImageManifestNotFoundError struct { + Name string + Tag string +} + +func (e *ImageManifestNotFoundError) Error() string { + return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", + e.Name, e.Tag) +} + +// BlobNotFoundError is returned when making an operation against a given image +// layer that does not exist in the registry. +type BlobNotFoundError struct { + Name string + Digest digest.Digest +} + +func (e *BlobNotFoundError) Error() string { + return fmt.Sprintf("No blob found with Name: %s, Digest: %s", + e.Name, e.Digest) +} + +// BlobUploadNotFoundError is returned when making a blob upload operation against an +// invalid blob upload location url. +// This may be the result of using a cancelled, completed, or stale upload +// location. +type BlobUploadNotFoundError struct { + Location string +} + +func (e *BlobUploadNotFoundError) Error() string { + return fmt.Sprintf("No blob upload found at Location: %s", e.Location) +} + +// BlobUploadInvalidRangeError is returned when attempting to upload an image +// blob chunk that is out of order. +// This provides the known BlobSize and LastValidRange which can be used to +// resume the upload. +type BlobUploadInvalidRangeError struct { + Location string + LastValidRange int + BlobSize int +} + +func (e *BlobUploadInvalidRangeError) Error() string { + return fmt.Sprintf( + "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", + e.Location, e.LastValidRange, e.BlobSize) +} + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) +} diff --git a/context.go b/context.go index 8d894e0f..a1e47abe 100644 --- a/context.go +++ b/context.go @@ -1,6 +1,9 @@ package registry -import "github.com/Sirupsen/logrus" +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/api/errors" +) // Context should contain the request specific context for use in across // handlers. Resources that don't need to be shared across handlers should not @@ -16,7 +19,7 @@ type Context struct { // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. - Errors Errors + Errors errors.Errors // vars contains the extracted gorilla/mux variables that can be used for // assignment. diff --git a/errors.go b/errors.go deleted file mode 100644 index 17758f44..00000000 --- a/errors.go +++ /dev/null @@ -1,311 +0,0 @@ -package registry - -import ( - "fmt" - "strings" - - "github.com/docker/docker-registry/digest" - "github.com/docker/docker-registry/storage" -) - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -const ( - // ErrorCodeUnknown is a catch-all for errors not defined below. - ErrorCodeUnknown ErrorCode = iota - - // The following errors can happen during a layer upload. - - // ErrorCodeInvalidDigest is returned when uploading a layer if the - // provided digest does not match the layer contents. - ErrorCodeInvalidDigest - - // ErrorCodeInvalidLength is returned when uploading a layer if the provided - // length does not match the content length. - ErrorCodeInvalidLength - - // ErrorCodeInvalidName is returned when the name in the manifest does not - // match the provided name. - ErrorCodeInvalidName - - // ErrorCodeInvalidTag is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeInvalidTag - - // ErrorCodeUnknownRepository when the repository name is not known. - ErrorCodeUnknownRepository - - // ErrorCodeUnknownManifest returned when image manifest name and tag is - // unknown, accompanied by a 404 status. - ErrorCodeUnknownManifest - - // ErrorCodeInvalidManifest returned when an image manifest is invalid, - // typically during a PUT operation. - ErrorCodeInvalidManifest - - // ErrorCodeUnverifiedManifest is returned when the manifest fails signature - // validation. - ErrorCodeUnverifiedManifest - - // ErrorCodeUnknownLayer is returned when the manifest references a - // nonexistent layer. - ErrorCodeUnknownLayer - - // ErrorCodeUnknownLayerUpload is returned when an upload is accessed. - ErrorCodeUnknownLayerUpload - - // ErrorCodeUntrustedSignature is returned when the manifest is signed by an - // untrusted source. - ErrorCodeUntrustedSignature -) - -var errorCodeStrings = map[ErrorCode]string{ - ErrorCodeUnknown: "UNKNOWN", - ErrorCodeInvalidDigest: "INVALID_DIGEST", - ErrorCodeInvalidLength: "INVALID_LENGTH", - ErrorCodeInvalidName: "INVALID_NAME", - ErrorCodeInvalidTag: "INVALID_TAG", - ErrorCodeUnknownRepository: "UNKNOWN_REPOSITORY", - ErrorCodeUnknownManifest: "UNKNOWN_MANIFEST", - ErrorCodeInvalidManifest: "INVALID_MANIFEST", - ErrorCodeUnverifiedManifest: "UNVERIFIED_MANIFEST", - ErrorCodeUnknownLayer: "UNKNOWN_LAYER", - ErrorCodeUnknownLayerUpload: "UNKNOWN_LAYER_UPLOAD", - ErrorCodeUntrustedSignature: "UNTRUSTED_SIGNATURE", -} - -var errorCodesMessages = map[ErrorCode]string{ - ErrorCodeUnknown: "unknown error", - ErrorCodeInvalidDigest: "provided digest did not match uploaded content", - ErrorCodeInvalidLength: "provided length did not match content length", - ErrorCodeInvalidName: "manifest name did not match URI", - ErrorCodeInvalidTag: "manifest tag did not match URI", - ErrorCodeUnknownRepository: "repository not known to registry", - ErrorCodeUnknownManifest: "manifest not known", - ErrorCodeInvalidManifest: "manifest is invalid", - ErrorCodeUnverifiedManifest: "manifest failed signature validation", - ErrorCodeUnknownLayer: "referenced layer not available", - ErrorCodeUnknownLayerUpload: "cannot resume unknown layer upload", - ErrorCodeUntrustedSignature: "manifest signed by untrusted source", -} - -var stringToErrorCode map[string]ErrorCode - -func init() { - stringToErrorCode = make(map[string]ErrorCode, len(errorCodeStrings)) - - // Build up reverse error code map - for k, v := range errorCodeStrings { - stringToErrorCode[v] = k - } -} - -// ParseErrorCode attempts to parse the error code string, returning -// ErrorCodeUnknown if the error is not known. -func ParseErrorCode(s string) ErrorCode { - ec, ok := stringToErrorCode[s] - - if !ok { - return ErrorCodeUnknown - } - - return ec -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - s, ok := errorCodeStrings[ec] - - if !ok { - return errorCodeStrings[ErrorCodeUnknown] - } - - return s -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - m, ok := errorCodesMessages[ec] - - if !ok { - return errorCodesMessages[ErrorCodeUnknown] - } - - return m -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - *ec = stringToErrorCode[string(text)] - - return nil -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors struct { - Errors []error `json:"errors,omitempty"` -} - -// Push pushes an error on to the error stack, with the optional detail -// argument. It is a programming error (ie panic) to push more than one -// detail at a time. -func (errs *Errors) Push(code ErrorCode, details ...interface{}) { - if len(details) > 1 { - panic("please specify zero or one detail items for this error") - } - - var detail interface{} - if len(details) > 0 { - detail = details[0] - } - - if err, ok := detail.(error); ok { - detail = err.Error() - } - - errs.PushErr(Error{ - Code: code, - Message: code.Message(), - Detail: detail, - }) -} - -// PushErr pushes an error interface onto the error stack. -func (errs *Errors) PushErr(err error) { - switch err.(type) { - case Error: - errs.Errors = append(errs.Errors, err) - default: - errs.Errors = append(errs.Errors, Error{Message: err.Error()}) - } -} - -func (errs *Errors) Error() string { - switch errs.Len() { - case 0: - return "" - case 1: - return errs.Errors[0].Error() - default: - msg := "errors:\n" - for _, err := range errs.Errors { - msg += err.Error() + "\n" - } - return msg - } -} - -// Clear clears the errors. -func (errs *Errors) Clear() { - errs.Errors = errs.Errors[:0] -} - -// Len returns the current number of errors. -func (errs *Errors) Len() int { - return len(errs.Errors) -} - -// DetailUnknownLayer provides detail for unknown layer errors, returned by -// image manifest push for layers that are not yet transferred. This intended -// to only be used on the backend to return detail for this specific error. -type DetailUnknownLayer struct { - - // Unknown should contain the contents of a layer descriptor, which is a - // single FSLayer currently. - Unknown storage.FSLayer `json:"unknown"` -} - -// RepositoryNotFoundError is returned when making an operation against a -// repository that does not exist in the registry. -type RepositoryNotFoundError struct { - Name string -} - -func (e *RepositoryNotFoundError) Error() string { - return fmt.Sprintf("No repository found with Name: %s", e.Name) -} - -// ImageManifestNotFoundError is returned when making an operation against a -// given image manifest that does not exist in the registry. -type ImageManifestNotFoundError struct { - Name string - Tag string -} - -func (e *ImageManifestNotFoundError) Error() string { - return fmt.Sprintf("No manifest found with Name: %s, Tag: %s", - e.Name, e.Tag) -} - -// BlobNotFoundError is returned when making an operation against a given image -// layer that does not exist in the registry. -type BlobNotFoundError struct { - Name string - Digest digest.Digest -} - -func (e *BlobNotFoundError) Error() string { - return fmt.Sprintf("No blob found with Name: %s, Digest: %s", - e.Name, e.Digest) -} - -// BlobUploadNotFoundError is returned when making a blob upload operation against an -// invalid blob upload location url. -// This may be the result of using a cancelled, completed, or stale upload -// location. -type BlobUploadNotFoundError struct { - Location string -} - -func (e *BlobUploadNotFoundError) Error() string { - return fmt.Sprintf("No blob upload found at Location: %s", e.Location) -} - -// BlobUploadInvalidRangeError is returned when attempting to upload an image -// blob chunk that is out of order. -// This provides the known BlobSize and LastValidRange which can be used to -// resume the upload. -type BlobUploadInvalidRangeError struct { - Location string - LastValidRange int - BlobSize int -} - -func (e *BlobUploadInvalidRangeError) Error() string { - return fmt.Sprintf( - "Invalid range provided for upload at Location: %s. Last Valid Range: %d, Blob Size: %d", - e.Location, e.LastValidRange, e.BlobSize) -} - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) -} diff --git a/errors_test.go b/errors_test.go deleted file mode 100644 index e0392eb6..00000000 --- a/errors_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package registry - -import ( - "encoding/json" - "testing" -) - -// TestErrorCodes ensures that error code format, mappings and -// marshaling/unmarshaling. round trips are stable. -func TestErrorCodes(t *testing.T) { - for ec := range errorCodeStrings { - if ec.String() != errorCodeStrings[ec] { - t.Fatalf("error code string incorrect: %q != %q", ec.String(), errorCodeStrings[ec]) - } - - if ec.Message() != errorCodesMessages[ec] { - t.Fatalf("incorrect message for error code %v: %q != %q", ec, ec.Message(), errorCodesMessages[ec]) - } - - // Serialize the error code using the json library to ensure that we - // get a string and it works round trip. - p, err := json.Marshal(ec) - - if err != nil { - t.Fatalf("error marshaling error code %v: %v", ec, err) - } - - if len(p) <= 0 { - t.Fatalf("expected content in marshaled before for error code %v", ec) - } - - // First, unmarshal to interface and ensure we have a string. - var ecUnspecified interface{} - if err := json.Unmarshal(p, &ecUnspecified); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if _, ok := ecUnspecified.(string); !ok { - t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) - } - - // Now, unmarshal with the error code type and ensure they are equal - var ecUnmarshaled ErrorCode - if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { - t.Fatalf("error unmarshaling error code %v: %v", ec, err) - } - - if ecUnmarshaled != ec { - t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) - } - } -} - -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -func TestErrorsManagement(t *testing.T) { - var errs Errors - - errs.Push(ErrorCodeInvalidDigest) - - var detail DetailUnknownLayer - detail.Unknown.BlobSum = "sometestblobsumdoesntmatter" - - errs.Push(ErrorCodeUnknownLayer, detail) - - p, err := json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - expectedJSON := "{\"errors\":[{\"code\":\"INVALID_DIGEST\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"UNKNOWN_LAYER\",\"message\":\"referenced layer not available\",\"detail\":{\"unknown\":{\"blobSum\":\"sometestblobsumdoesntmatter\"}}}]}" - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } - - errs.Clear() - errs.Push(ErrorCodeUnknown) - expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" - p, err = json.Marshal(errs) - - if err != nil { - t.Fatalf("error marashaling errors: %v", err) - } - - if string(p) != expectedJSON { - t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) - } -} diff --git a/images.go b/images.go index 495e193a..74ae067e 100644 --- a/images.go +++ b/images.go @@ -5,8 +5,8 @@ import ( "fmt" "net/http" + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/digest" - "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) @@ -41,7 +41,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http manifest, err := manifests.Get(imh.Name, imh.Tag) if err != nil { - imh.Errors.Push(ErrorCodeUnknownManifest, err) + imh.Errors.Push(errors.ErrorCodeManifestUnknown, err) w.WriteHeader(http.StatusNotFound) return } @@ -58,7 +58,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http var manifest storage.SignedManifest if err := dec.Decode(&manifest); err != nil { - imh.Errors.Push(ErrorCodeInvalidManifest, err) + imh.Errors.Push(errors.ErrorCodeManifestInvalid, err) w.WriteHeader(http.StatusBadRequest) return } @@ -71,14 +71,14 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http for _, verificationError := range err { switch verificationError := verificationError.(type) { case storage.ErrUnknownLayer: - imh.Errors.Push(ErrorCodeUnknownLayer, verificationError.FSLayer) + imh.Errors.Push(errors.ErrorCodeBlobUnknown, verificationError.FSLayer) case storage.ErrManifestUnverified: - imh.Errors.Push(ErrorCodeUnverifiedManifest) + imh.Errors.Push(errors.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { // TODO(stevvooe): We need to really need to move all // errors to types. Its much more straightforward. - imh.Errors.Push(ErrorCodeInvalidDigest) + imh.Errors.Push(errors.ErrorCodeDigestInvalid) } else { imh.Errors.PushErr(verificationError) } @@ -99,10 +99,10 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h if err := manifests.Delete(imh.Name, imh.Tag); err != nil { switch err := err.(type) { case storage.ErrUnknownManifest: - imh.Errors.Push(ErrorCodeUnknownManifest, err) + imh.Errors.Push(errors.ErrorCodeManifestUnknown, err) w.WriteHeader(http.StatusNotFound) default: - imh.Errors.Push(ErrorCodeUnknown, err) + imh.Errors.Push(errors.ErrorCodeUnknown, err) w.WriteHeader(http.StatusBadRequest) } return diff --git a/layer.go b/layer.go index 4d937c64..4da7723a 100644 --- a/layer.go +++ b/layer.go @@ -3,6 +3,7 @@ package registry import ( "net/http" + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" @@ -14,7 +15,7 @@ func layerDispatcher(ctx *Context, r *http.Request) http.Handler { if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(ErrorCodeInvalidDigest, err) + ctx.Errors.Push(errors.ErrorCodeDigestInvalid, err) }) } @@ -49,9 +50,9 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { switch err := err.(type) { case storage.ErrUnknownLayer: w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(ErrorCodeUnknownLayer, err.FSLayer) + lh.Errors.Push(errors.ErrorCodeBlobUnknown, err.FSLayer) default: - lh.Errors.Push(ErrorCodeUnknown, err) + lh.Errors.Push(errors.ErrorCodeUnknown, err) } return } diff --git a/layerupload.go b/layerupload.go index d7aaa24f..af8bd457 100644 --- a/layerupload.go +++ b/layerupload.go @@ -7,6 +7,7 @@ import ( "strconv" "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" @@ -38,7 +39,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logrus.Infof("error resolving upload: %v", err) w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) }) } @@ -66,7 +67,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R upload, err := layers.Upload(luh.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) return } @@ -75,7 +76,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) return } w.WriteHeader(http.StatusAccepted) @@ -85,12 +86,12 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(ErrorCodeUnknownLayerUpload) + luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) } if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) return } @@ -102,7 +103,7 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(ErrorCodeUnknownLayerUpload) + luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) } var finished bool @@ -119,14 +120,14 @@ func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Requ if err := luh.maybeCompleteUpload(w, r); err != nil { if err != errNotReadyToComplete { w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) return } } if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) return } @@ -141,7 +142,7 @@ func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Requ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(ErrorCodeUnknownLayerUpload) + luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) } } @@ -194,14 +195,14 @@ func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *htt func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { layer, err := luh.Upload.Finish(size, dgst) if err != nil { - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) return } layerURL, err := luh.urlBuilder.forLayer(layer) if err != nil { - luh.Errors.Push(ErrorCodeUnknown, err) + luh.Errors.Push(errors.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) return } diff --git a/tags.go b/tags.go index 4916c151..12a5062f 100644 --- a/tags.go +++ b/tags.go @@ -4,6 +4,7 @@ import ( "encoding/json" "net/http" + "github.com/docker/docker-registry/api/errors" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) @@ -39,7 +40,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { switch err := err.(type) { case storage.ErrUnknownRepository: w.WriteHeader(404) - th.Errors.Push(ErrorCodeUnknownRepository, map[string]string{"name": th.Name}) + th.Errors.Push(errors.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) default: th.Errors.PushErr(err) } From d9c2203f9f39b8f2feceba21b9eaed1702e0f5eb Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 15:27:19 -0800 Subject: [PATCH 122/165] Re-enables "go vet" in circle.yml --- circle.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/circle.yml b/circle.yml index c5ce9b08..88125a8d 100644 --- a/circle.yml +++ b/circle.yml @@ -19,13 +19,8 @@ test: - go version override: - test -z $(gofmt -s -l . | tee /dev/stderr) - - # TODO(stevvooe): go vet is complaining about something that can't be - # reproduced locally and doesn't make sense based on the existing code. - # Turning it off for now. - # - go vet ./... - - - test -z $(golint ./... | tee /dev/stderr) + - go vet ./... + - test -z $(golint ./... | tee /dev/stderr) - go test -test.v -test.short ./... # Disabling the race detector due to massive memory usage. From 87c10960d23e3fdfef7c9324d70b600b11203a96 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Dec 2014 15:57:41 -0800 Subject: [PATCH 123/165] Initial Dockerfile for running registry --- Dockerfile | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..6297b38d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,14 @@ +FROM golang + +COPY . /go/src/github.com/docker/docker-registry + +# Fetch any dependencies to run the registry +RUN go get github.com/docker/docker-registry +RUN go install github.com/docker/docker-registry/cmd/registry + +ENV CONFIG_PATH /etc/docker/registry/config.yml +COPY ./cmd/registry/config.yml $CONFIG_PATH + +EXPOSE 5000 +ENV PATH /go/bin +CMD registry $CONFIG_PATH \ No newline at end of file From f57e54491926aa3bf2653a3c2b088b00a878edac Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 10 Dec 2014 16:20:14 -0800 Subject: [PATCH 124/165] Increases stress test factor of TestConcurrentFileStreams Also makes this test respect the Short flag, reducing the number of threads by a factor of 4 and space usage by a factor of 16 Note: this test is probably unreasonable to run on the inmemory driver without the Short flag --- storagedriver/testsuites/testsuites.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index b568a9c8..16c8cd0e 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -696,6 +696,13 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { // c.Skip("Need to fix out-of-process concurrency") // } + numStreams := 32 + + if testing.Short() { + numStreams = 8 + c.Log("Reducing number of streams to 8 for short mode") + } + var wg sync.WaitGroup testStream := func(size int64) { @@ -703,13 +710,10 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { suite.testFileStreams(c, size) } - wg.Add(6) - go testStream(8 * 1024 * 1024) - go testStream(4 * 1024 * 1024) - go testStream(2 * 1024 * 1024) - go testStream(1024 * 1024) - go testStream(1024) - go testStream(64) + wg.Add(numStreams) + for i := numStreams; i > 0; i-- { + go testStream(int64(numStreams) * 1024 * 1024) + } wg.Wait() } @@ -718,6 +722,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf, err := ioutil.TempFile("", "tf") c.Assert(err, check.IsNil) defer os.Remove(tf.Name()) + defer tf.Close() filename := randomPath(32) defer suite.StorageDriver.Delete(firstPart(filename)) From b721b0a15c50aae1d77e50f1211972b03a07f210 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Dec 2014 16:21:12 -0800 Subject: [PATCH 125/165] Export error descriptors and provide tool generate markdown table To support accurate specification generation, this changeset includes a quick and dirty tool to generate a markdown table of error codes generated by the registry API. Equivalent supports for routes will likely follow. Exported descriptors could be used to generate other documentation, as well. --- api/errors/descriptors.go | 42 +++++++----- api/errors/errors_test.go | 2 +- cmd/registry-api-doctable-gen/main.go | 95 +++++++++++++++++++++++++++ 3 files changed, 120 insertions(+), 19 deletions(-) create mode 100644 cmd/registry-api-doctable-gen/main.go diff --git a/api/errors/descriptors.go b/api/errors/descriptors.go index e3dfd37b..1d71162f 100644 --- a/api/errors/descriptors.go +++ b/api/errors/descriptors.go @@ -20,16 +20,21 @@ type ErrorDescriptor struct { // for use in documentation. Description string - // DefaultStatusCode should to be returned via the HTTP API. Some error - // may have different status codes depending on the situation. - DefaultStatusCode int + // HTTPStatusCodes provides a list of status under which this error + // condition may arise. If it is empty, the error condition may be seen + // for any status code. + HTTPStatusCodes []int } -var descriptors = []ErrorDescriptor{ +// Descriptors provides a list of HTTP API Error codes that may be encountered +// when interacting with the registry API. +var Descriptors = []ErrorDescriptor{ { Code: ErrorCodeUnknown, Value: "UNKNOWN", Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, }, { Code: ErrorCodeDigestInvalid, @@ -40,7 +45,7 @@ var descriptors = []ErrorDescriptor{ include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, - DefaultStatusCode: http.StatusBadRequest, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeSizeInvalid, @@ -49,7 +54,7 @@ var descriptors = []ErrorDescriptor{ Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, - DefaultStatusCode: http.StatusBadRequest, + HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeNameInvalid, @@ -57,7 +62,7 @@ var descriptors = []ErrorDescriptor{ Message: "manifest name did not match URI", Description: `During a manifest upload, if the name in the manifest does not match the uri name, this error will be returned.`, - DefaultStatusCode: http.StatusBadRequest, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeTagInvalid, @@ -65,7 +70,7 @@ var descriptors = []ErrorDescriptor{ Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, - DefaultStatusCode: http.StatusBadRequest, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeNameUnknown, @@ -73,7 +78,7 @@ var descriptors = []ErrorDescriptor{ Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, - DefaultStatusCode: http.StatusNotFound, + HTTPStatusCodes: []int{http.StatusNotFound}, }, { Code: ErrorCodeManifestUnknown, @@ -81,7 +86,7 @@ var descriptors = []ErrorDescriptor{ Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, - DefaultStatusCode: http.StatusNotFound, + HTTPStatusCodes: []int{http.StatusNotFound}, }, { Code: ErrorCodeManifestInvalid, @@ -89,8 +94,9 @@ var descriptors = []ErrorDescriptor{ Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a - more specific error is included.`, - DefaultStatusCode: http.StatusBadRequest, + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeManifestUnverified, @@ -98,7 +104,7 @@ var descriptors = []ErrorDescriptor{ Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, - DefaultStatusCode: http.StatusBadRequest, + HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeBlobUnknown, @@ -108,7 +114,7 @@ var descriptors = []ErrorDescriptor{ registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`, - DefaultStatusCode: http.StatusNotFound, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { @@ -117,7 +123,7 @@ var descriptors = []ErrorDescriptor{ Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, - DefaultStatusCode: http.StatusNotFound, + HTTPStatusCodes: []int{http.StatusNotFound}, }, } @@ -125,10 +131,10 @@ var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor var idToDescriptors map[string]ErrorDescriptor func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(descriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(descriptors)) + errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(Descriptors)) + idToDescriptors = make(map[string]ErrorDescriptor, len(Descriptors)) - for _, descriptor := range descriptors { + for _, descriptor := range Descriptors { errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor } diff --git a/api/errors/errors_test.go b/api/errors/errors_test.go index f0712bef..7a68fe90 100644 --- a/api/errors/errors_test.go +++ b/api/errors/errors_test.go @@ -11,7 +11,7 @@ import ( // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { - for _, desc := range descriptors { + for _, desc := range Descriptors { if desc.Code.String() != desc.Value { t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) } diff --git a/cmd/registry-api-doctable-gen/main.go b/cmd/registry-api-doctable-gen/main.go new file mode 100644 index 00000000..f76c249e --- /dev/null +++ b/cmd/registry-api-doctable-gen/main.go @@ -0,0 +1,95 @@ +// registry-api-doctable-gen uses various descriptors within the registry code +// base to generate markdown tables for use in documentation. This is only +// meant to facilitate updates to documentation and not as an automated tool. +// +// For now, this only includes support for error codes: +// +// $ registry-api-doctable-gen errors +// +package main + +import ( + "fmt" + "io" + "log" + "os" + "reflect" + "strings" + "text/tabwriter" + + "github.com/docker/docker-registry/api/errors" +) + +func main() { + + if len(os.Args) < 2 { + log.Fatalln("please specify a table to generate: (errors)") + } + + switch os.Args[1] { + case "errors": + dumpErrors(os.Stdout) + default: + log.Fatalln("unknown descriptor table:", os.Args[1]) + } + +} + +func dumpErrors(wr io.Writer) { + writer := tabwriter.NewWriter(os.Stdout, 8, 8, 0, '\t', 0) + defer writer.Flush() + + fmt.Fprint(writer, "|") + dtype := reflect.TypeOf(errors.ErrorDescriptor{}) + var fieldsPrinted int + for i := 0; i < dtype.NumField(); i++ { + field := dtype.Field(i) + if field.Name == "Value" { + continue + } + + fmt.Fprint(writer, field.Name, "|") + fieldsPrinted++ + } + + divider := strings.Repeat("-", 8) + var parts []string + for i := 0; i < fieldsPrinted; i++ { + parts = append(parts, divider) + } + divider = strings.Join(parts, "|") + + fmt.Fprintln(writer, "\n"+divider) + + for _, descriptor := range errors.Descriptors { + fmt.Fprint(writer, "|") + + v := reflect.ValueOf(descriptor) + for i := 0; i < dtype.NumField(); i++ { + value := v.Field(i).Interface() + field := v.Type().Field(i) + if field.Name == "Value" { + continue + } else if field.Name == "Description" { + value = strings.Replace(value.(string), "\n", " ", -1) + } else if field.Name == "Code" { + value = fmt.Sprintf("`%s`", value) + } else if field.Name == "HTTPStatusCodes" { + if len(value.([]int)) > 0 { + var codes []string + for _, code := range value.([]int) { + codes = append(codes, fmt.Sprint(code)) + } + value = strings.Join(codes, ", ") + } else { + value = "Any" + } + + } + + fmt.Fprint(writer, value, "|") + } + + fmt.Fprint(writer, "\n") + } +} From 3bbc8ce3c2e39b535cccc2201e9d3ad69287b6ef Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Wed, 10 Dec 2014 17:59:13 -0800 Subject: [PATCH 126/165] Fix address --- cmd/registry/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/registry/config.yml b/cmd/registry/config.yml index 2f9ad0a2..818bedc2 100644 --- a/cmd/registry/config.yml +++ b/cmd/registry/config.yml @@ -2,4 +2,4 @@ version: 0.1 loglevel: debug storage: inmemory http: - addr: localhost:5000 \ No newline at end of file + addr: :5000 From 2a16a2ff6a650db4165464d2e28de1428a069707 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Dec 2014 22:29:58 -0800 Subject: [PATCH 127/165] Pluralize route API paths During the specification period, it was suggested that pluralized object names are more idiomatic in APIs than singular. This changeset simply adopts that preference for the API going forward. The client has been updated to remain compatible. --- client/client.go | 8 ++++---- client/client_test.go | 16 ++++++++-------- routes.go | 8 ++++---- routes_test.go | 24 ++++++++++++------------ 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/client/client.go b/client/client.go index 0d343209..8f31cb4e 100644 --- a/client/client.go +++ b/client/client.go @@ -222,7 +222,7 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { } func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { - response, err := http.Head(fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst)) + response, err := http.Head(fmt.Sprintf("%s/v2/%s/blobs/%s", r.Endpoint, name, dgst)) if err != nil { return -1, err } @@ -255,7 +255,7 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { getRequest, err := http.NewRequest("GET", - fmt.Sprintf("%s/v2/%s/blob/%s", r.Endpoint, name, dgst), nil) + fmt.Sprintf("%s/v2/%s/blobs/%s", r.Endpoint, name, dgst), nil) if err != nil { return nil, 0, err } @@ -294,7 +294,7 @@ func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (i func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { postRequest, err := http.NewRequest("POST", - fmt.Sprintf("%s/v2/%s/blob/upload/", r.Endpoint, name), nil) + fmt.Sprintf("%s/v2/%s/blobs/uploads/", r.Endpoint, name), nil) if err != nil { return "", err } @@ -519,7 +519,7 @@ func (r *clientImpl) CancelBlobUpload(location string) error { // imageManifestURL is a helper method for returning the full url to an image // manifest func (r *clientImpl) imageManifestURL(name, tag string) string { - return fmt.Sprintf("%s/v2/%s/manifest/%s", r.Endpoint, name, tag) + return fmt.Sprintf("%s/v2/%s/manifests/%s", r.Endpoint, name, tag) } // parseRangeHeader parses out the offset and length from a returned Range diff --git a/client/client_test.go b/client/client_test.go index 979f1313..f3082141 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -41,7 +41,7 @@ func TestPush(t *testing.T) { // because we can't know which blob will get which location. // It's sort of okay because we're using unique digests, but this needs // to change at some point. - uploadLocations[i] = fmt.Sprintf("/v2/%s/blob/test-uuid", name) + uploadLocations[i] = fmt.Sprintf("/v2/%s/blobs/test-uuid", name) blobs[i] = storage.FSLayer{BlobSum: blob.digest} history[i] = storage.ManifestHistory{V1Compatibility: blob.digest.String()} } @@ -66,7 +66,7 @@ func TestPush(t *testing.T) { blobRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + name + "/blob/upload/", + Route: "/v2/" + name + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -94,7 +94,7 @@ func TestPush(t *testing.T) { handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + name + "/manifest/" + tag, + Route: "/v2/" + name + "/manifests/" + tag, Body: manifest.Raw, }, Response: testutil.Response{ @@ -185,7 +185,7 @@ func TestPull(t *testing.T) { blobRequestResponseMappings[i] = testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/blob/" + blob.digest.String(), + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -197,7 +197,7 @@ func TestPull(t *testing.T) { handler := testutil.NewHandler(append(blobRequestResponseMappings, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/manifest/" + tag, + Route: "/v2/" + name + "/manifests/" + tag, }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -292,7 +292,7 @@ func TestPullResume(t *testing.T) { layerRequestResponseMappings[2*i] = testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/blob/" + blob.digest.String(), + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -305,7 +305,7 @@ func TestPullResume(t *testing.T) { layerRequestResponseMappings[2*i+1] = testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/blob/" + blob.digest.String(), + Route: "/v2/" + name + "/blobs/" + blob.digest.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -318,7 +318,7 @@ func TestPullResume(t *testing.T) { layerRequestResponseMappings = append(layerRequestResponseMappings, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + name + "/manifest/" + tag, + Route: "/v2/" + name + "/manifests/" + tag, }, Response: testutil.Response{ StatusCode: http.StatusOK, diff --git a/routes.go b/routes.go index 4aa0097f..440473e9 100644 --- a/routes.go +++ b/routes.go @@ -31,7 +31,7 @@ func v2APIRouter() *mux.Router { // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and tag. // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and tag. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/manifest/{tag:" + common.TagNameRegexp.String() + "}"). + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/manifests/{tag:" + common.TagNameRegexp.String() + "}"). Name(routeNameImageManifest) // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. @@ -41,19 +41,19 @@ func v2APIRouter() *mux.Router { // GET /v2//blob/ Layer Fetch the blob identified by digest. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). Name(routeNameBlob) // POST /v2//blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum. router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/upload/"). + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/"). Name(routeNameBlobUpload) // GET /v2//blob/upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. // PUT /v2//blob/upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. // DELETE /v2//blob/upload/ Layer Upload Cancel the upload identified by layer and uuid router. - Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blob/upload/{uuid}"). + Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}"). Name(routeNameBlobUploadResume) return router diff --git a/routes_test.go b/routes_test.go index 9085d302..8c514943 100644 --- a/routes_test.go +++ b/routes_test.go @@ -48,7 +48,7 @@ func TestRouter(t *testing.T) { for _, testcase := range []routeTestCase{ { RouteName: routeNameImageManifest, - RequestURI: "/v2/foo/bar/manifest/tag", + RequestURI: "/v2/foo/bar/manifests/tag", Vars: map[string]string{ "name": "foo/bar", "tag": "tag", @@ -63,7 +63,7 @@ func TestRouter(t *testing.T) { }, { RouteName: routeNameBlob, - RequestURI: "/v2/foo/bar/blob/tarsum.dev+foo:abcdef0919234", + RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", "digest": "tarsum.dev+foo:abcdef0919234", @@ -71,7 +71,7 @@ func TestRouter(t *testing.T) { }, { RouteName: routeNameBlob, - RequestURI: "/v2/foo/bar/blob/sha256:abcdef0919234", + RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", "digest": "sha256:abcdef0919234", @@ -79,14 +79,14 @@ func TestRouter(t *testing.T) { }, { RouteName: routeNameBlobUpload, - RequestURI: "/v2/foo/bar/blob/upload/", + RequestURI: "/v2/foo/bar/blobs/uploads/", Vars: map[string]string{ "name": "foo/bar", }, }, { RouteName: routeNameBlobUploadResume, - RequestURI: "/v2/foo/bar/blob/upload/uuid", + RequestURI: "/v2/foo/bar/blobs/uploads/uuid", Vars: map[string]string{ "name": "foo/bar", "uuid": "uuid", @@ -94,7 +94,7 @@ func TestRouter(t *testing.T) { }, { RouteName: routeNameBlobUploadResume, - RequestURI: "/v2/foo/bar/blob/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ "name": "foo/bar", "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", @@ -102,7 +102,7 @@ func TestRouter(t *testing.T) { }, { RouteName: routeNameBlobUploadResume, - RequestURI: "/v2/foo/bar/blob/upload/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", + RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", Vars: map[string]string{ "name": "foo/bar", "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", @@ -113,9 +113,9 @@ func TestRouter(t *testing.T) { // "foo/bar/image/image" and image for "foo/bar/image" with tag // "tags" RouteName: routeNameImageManifest, - RequestURI: "/v2/foo/bar/manifest/manifest/tags", + RequestURI: "/v2/foo/bar/manifests/manifests/tags", Vars: map[string]string{ - "name": "foo/bar/manifest", + "name": "foo/bar/manifests", "tag": "tags", }, }, @@ -123,14 +123,14 @@ func TestRouter(t *testing.T) { // This case presents an ambiguity between foo/bar with tag="tags" // and list tags for "foo/bar/manifest" RouteName: routeNameTags, - RequestURI: "/v2/foo/bar/manifest/tags/list", + RequestURI: "/v2/foo/bar/manifests/tags/list", Vars: map[string]string{ - "name": "foo/bar/manifest", + "name": "foo/bar/manifests", }, }, { RouteName: routeNameBlobUploadResume, - RequestURI: "/v2/foo/../../layer/upload/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, } { From 76929fb63f39b5d760bd50e9febc93dab8b06c15 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 10 Dec 2014 22:33:36 -0800 Subject: [PATCH 128/165] Implement V2 API base endpoint This implements a base endpoint that will respond with a 200 OK and an empty json response. Such an endpoint can be used as to ping the v2 service or as an endpoint to check authorization status. --- api_test.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ app.go | 15 +++++++++++++++ routes.go | 6 ++++++ routes_test.go | 5 +++++ urls.go | 14 ++++++++++++++ 5 files changed, 85 insertions(+) diff --git a/api_test.go b/api_test.go index 2c832a17..d6cf34dd 100644 --- a/api_test.go +++ b/api_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/http/httptest" "net/http/httputil" @@ -22,6 +23,50 @@ import ( "github.com/gorilla/handlers" ) +// TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified +// 200 OK response. +func TestCheckAPI(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + + app := NewApp(config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := newURLBuilderFromString(server.URL) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + baseURL, err := builder.buildBaseURL() + if err != nil { + t.Fatalf("unexpected error building base url: %v", err) + } + + resp, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error issuing request: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "issuing api base check", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Content-Type": []string{"application/json"}, + "Content-Length": []string{"2"}, + }) + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + + if string(p) != "{}" { + t.Fatalf("unexpected response body: %v", string(p)) + } +} + // TestLayerAPI conducts a full of the of the layer api. func TestLayerAPI(t *testing.T) { // TODO(stevvooe): This test code is complete junk but it should cover the diff --git a/app.go b/app.go index 324cad29..76605f1b 100644 --- a/app.go +++ b/app.go @@ -1,6 +1,7 @@ package registry import ( + "fmt" "net/http" "github.com/docker/docker-registry/storagedriver" @@ -38,6 +39,9 @@ func NewApp(configuration configuration.Configuration) *App { } // Register the handler dispatchers. + app.register(routeNameBase, func(ctx *Context, r *http.Request) http.Handler { + return http.HandlerFunc(apiBase) + }) app.register(routeNameImageManifest, imageManifestDispatcher) app.register(routeNameTags, tagsDispatcher) app.register(routeNameBlob, layerDispatcher) @@ -134,3 +138,14 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } }) } + +// apiBase implements a simple yes-man for doing overall checks against the +// api. This can support auth roundtrips to support docker login. +func apiBase(w http.ResponseWriter, r *http.Request) { + const emptyJSON = "{}" + // Provide a simple /v2/ 200 OK response with empty json response. + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) + + fmt.Fprint(w, emptyJSON) +} diff --git a/routes.go b/routes.go index 440473e9..b291ee4b 100644 --- a/routes.go +++ b/routes.go @@ -6,6 +6,7 @@ import ( ) const ( + routeNameBase = "base" routeNameImageManifest = "image-manifest" routeNameTags = "tags" routeNameBlob = "blob" @@ -27,6 +28,11 @@ func v2APIRouter() *mux.Router { router := mux.NewRouter(). StrictSlash(true) + // GET /v2/ Check Check that the registry implements API version 2(.1) + router. + Path("/v2/"). + Name(routeNameBase) + // GET /v2//manifest/ Image Manifest Fetch the image manifest identified by name and tag. // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and tag. // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and tag. diff --git a/routes_test.go b/routes_test.go index 8c514943..6d684a61 100644 --- a/routes_test.go +++ b/routes_test.go @@ -46,6 +46,11 @@ func TestRouter(t *testing.T) { server := httptest.NewServer(router) for _, testcase := range []routeTestCase{ + { + RouteName: routeNameBase, + RequestURI: "/v2/", + Vars: map[string]string{}, + }, { RouteName: routeNameImageManifest, RequestURI: "/v2/foo/bar/manifests/tag", diff --git a/urls.go b/urls.go index 8f34a5b1..92233da4 100644 --- a/urls.go +++ b/urls.go @@ -39,6 +39,20 @@ func newURLBuilderFromString(root string) (*urlBuilder, error) { return newURLBuilder(u), nil } +func (ub *urlBuilder) buildBaseURL() (string, error) { + route := clonedRoute(ub.router, routeNameBase) + + baseURL, err := route. + Schemes(ub.url.Scheme). + Host(ub.url.Host). + URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + func (ub *urlBuilder) buildTagsURL(name string) (string, error) { route := clonedRoute(ub.router, routeNameTags) From 8a1889efeb0853fef93b3d5d9fbf84e206633d87 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 11 Dec 2014 14:11:47 -0800 Subject: [PATCH 129/165] Enforces a path format for storage drivers (#819) Requires all paths in the inmemory and filesystem drivers to begin with a slash, and then contain only valid path components (2+ alphanumeric characters with optional period, hyphen, and underscore separators) delimited by slashes. Also updates the storage driver test suites to construct paths of this format, and causes the suite to abort if files are not cleaned up after the test run. --- storagedriver/filesystem/driver.go | 34 +++++++++++++++++++ storagedriver/inmemory/driver.go | 34 +++++++++++++++++++ storagedriver/storagedriver.go | 21 ++++++++++++ storagedriver/testsuites/testsuites.go | 46 ++++++++++++++++++-------- 4 files changed, 122 insertions(+), 13 deletions(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 3e352125..49a94a50 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -56,6 +56,10 @@ func New(rootDirectory string) *Driver { // GetContent retrieves the content stored at "path" as a []byte. func (d *Driver) GetContent(path string) ([]byte, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + rc, err := d.ReadStream(path, 0) if err != nil { return nil, err @@ -72,6 +76,10 @@ func (d *Driver) GetContent(path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *Driver) PutContent(subPath string, contents []byte) error { + if !storagedriver.PathRegexp.MatchString(subPath) { + return storagedriver.InvalidPathError{Path: subPath} + } + if _, err := d.WriteStream(subPath, 0, bytes.NewReader(contents)); err != nil { return err } @@ -82,6 +90,10 @@ func (d *Driver) PutContent(subPath string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -110,6 +122,10 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.Reader at a location // designated by the given path. func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn int64, err error) { + if !storagedriver.PathRegexp.MatchString(subPath) { + return 0, storagedriver.InvalidPathError{Path: subPath} + } + if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: subPath, Offset: offset} } @@ -150,6 +166,10 @@ func (d *Driver) WriteStream(subPath string, offset int64, reader io.Reader) (nn // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) { + if !storagedriver.PathRegexp.MatchString(subPath) { + return nil, storagedriver.InvalidPathError{Path: subPath} + } + fullPath := d.fullPath(subPath) fi, err := os.Stat(fullPath) @@ -170,6 +190,10 @@ func (d *Driver) Stat(subPath string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(subPath string) ([]string, error) { + if !storagedriver.PathRegexp.MatchString(subPath) && subPath != "/" { + return nil, storagedriver.InvalidPathError{Path: subPath} + } + if subPath[len(subPath)-1] != '/' { subPath += "/" } @@ -196,6 +220,12 @@ func (d *Driver) List(subPath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *Driver) Move(sourcePath string, destPath string) error { + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath} + } + source := d.fullPath(sourcePath) dest := d.fullPath(destPath) @@ -213,6 +243,10 @@ func (d *Driver) Move(sourcePath string, destPath string) error { // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *Driver) Delete(subPath string) error { + if !storagedriver.PathRegexp.MatchString(subPath) { + return storagedriver.InvalidPathError{Path: subPath} + } + fullPath := d.fullPath(subPath) _, err := os.Stat(fullPath) diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 841ce56c..7481c472 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -46,6 +46,10 @@ func New() *Driver { // GetContent retrieves the content stored at "path" as a []byte. func (d *Driver) GetContent(path string) ([]byte, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + d.mutex.RLock() defer d.mutex.RUnlock() @@ -60,6 +64,10 @@ func (d *Driver) GetContent(path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *Driver) PutContent(p string, contents []byte) error { + if !storagedriver.PathRegexp.MatchString(p) { + return storagedriver.InvalidPathError{Path: p} + } + d.mutex.Lock() defer d.mutex.Unlock() @@ -79,6 +87,10 @@ func (d *Driver) PutContent(p string, contents []byte) error { // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + d.mutex.RLock() defer d.mutex.RUnlock() @@ -103,6 +115,10 @@ func (d *Driver) ReadStream(path string, offset int64) (io.ReadCloser, error) { // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn int64, err error) { + if !storagedriver.PathRegexp.MatchString(path) { + return 0, storagedriver.InvalidPathError{Path: path} + } + d.mutex.Lock() defer d.mutex.Unlock() @@ -135,6 +151,10 @@ func (d *Driver) WriteStream(path string, offset int64, reader io.Reader) (nn in // Stat returns info about the provided path. func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) { + if !storagedriver.PathRegexp.MatchString(path) { + return nil, storagedriver.InvalidPathError{Path: path} + } + d.mutex.RLock() defer d.mutex.RUnlock() @@ -161,6 +181,10 @@ func (d *Driver) Stat(path string) (storagedriver.FileInfo, error) { // List returns a list of the objects that are direct descendants of the given // path. func (d *Driver) List(path string) ([]string, error) { + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { + return nil, storagedriver.InvalidPathError{Path: path} + } + normalized := normalize(path) found := d.root.find(normalized) @@ -188,6 +212,12 @@ func (d *Driver) List(path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *Driver) Move(sourcePath string, destPath string) error { + if !storagedriver.PathRegexp.MatchString(sourcePath) { + return storagedriver.InvalidPathError{Path: sourcePath} + } else if !storagedriver.PathRegexp.MatchString(destPath) { + return storagedriver.InvalidPathError{Path: destPath} + } + d.mutex.Lock() defer d.mutex.Unlock() @@ -204,6 +234,10 @@ func (d *Driver) Move(sourcePath string, destPath string) error { // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *Driver) Delete(path string) error { + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path} + } + d.mutex.Lock() defer d.mutex.Unlock() diff --git a/storagedriver/storagedriver.go b/storagedriver/storagedriver.go index 339b465a..f86e3d1e 100644 --- a/storagedriver/storagedriver.go +++ b/storagedriver/storagedriver.go @@ -3,6 +3,7 @@ package storagedriver import ( "fmt" "io" + "regexp" "strconv" "strings" ) @@ -72,6 +73,17 @@ type StorageDriver interface { Delete(path string) error } +// PathComponentRegexp is the regular expression which each repository path +// component must match. +// A component of a repository path must be at least two characters, optionally +// separated by periods, dashes or underscores. +var PathComponentRegexp = regexp.MustCompile(`[a-z0-9]+([._-]?[a-z0-9])+`) + +// PathRegexp is the regular expression which each repository path must match. +// A repository path is absolute, beginning with a slash and containing a +// positive number of path components separated by slashes. +var PathRegexp = regexp.MustCompile(`^(/[a-z0-9]+([._-]?[a-z0-9])+)+$`) + // PathNotFoundError is returned when operating on a nonexistent path. type PathNotFoundError struct { Path string @@ -81,6 +93,15 @@ func (err PathNotFoundError) Error() string { return fmt.Sprintf("Path not found: %s", err.Path) } +// InvalidPathError is returned when the provided path is malformed. +type InvalidPathError struct { + Path string +} + +func (err InvalidPathError) Error() string { + return fmt.Sprintf("Invalid path: %s", err.Path) +} + // InvalidOffsetError is returned when attempting to read or write from an // invalid offset. type InvalidOffsetError struct { diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 16c8cd0e..8dcc2953 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -108,6 +108,16 @@ func (suite *DriverSuite) TearDownSuite(c *check.C) { } } +// TearDownTest tears down the gocheck test. +// This causes the suite to abort if any files are left around in the storage +// driver. +func (suite *DriverSuite) TearDownTest(c *check.C) { + files, _ := suite.StorageDriver.List("/") + if len(files) > 0 { + c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) + } +} + // TestWriteRead1 tests a simple write-read workflow. func (suite *DriverSuite) TestWriteRead1(c *check.C) { filename := randomPath(32) @@ -337,7 +347,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) if fi.Size() > chunkSize { - c.Fatalf("Offset too large, %d > %d", fi.Size(), chunkSize) + c.Errorf("Offset too large, %d > %d", fi.Size(), chunkSize) } nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) @@ -349,7 +359,7 @@ func (suite *DriverSuite) TestContinueStreamAppend(c *check.C) { c.Assert(fi.Size(), check.Equals, 2*chunkSize) if fi.Size() > 2*chunkSize { - c.Fatalf("Offset too large, %d > %d", fi.Size(), 2*chunkSize) + c.Errorf("Offset too large, %d > %d", fi.Size(), 2*chunkSize) } nn, err = suite.StorageDriver.WriteStream(filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) @@ -409,7 +419,7 @@ func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete("/") + defer suite.StorageDriver.Delete(rootDirectory) parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) @@ -625,11 +635,11 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { c.Assert(fi.IsDir(), check.Equals, false) if start.After(fi.ModTime()) { - c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start) + c.Errorf("modtime %s before file created (%v)", fi.ModTime(), start) } if fi.ModTime().After(expectedModTime) { - c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) + c.Errorf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) } // Call on directory @@ -643,11 +653,11 @@ func (suite *DriverSuite) TestStatCall(c *check.C) { c.Assert(fi.IsDir(), check.Equals, true) if start.After(fi.ModTime()) { - c.Fatalf("modtime %s before file created (%v)", fi.ModTime(), start) + c.Errorf("modtime %s before file created (%v)", fi.ModTime(), start) } if fi.ModTime().After(expectedModTime) { - c.Fatalf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) + c.Errorf("modtime %s after file created (%v)", fi.ModTime(), expectedModTime) } } @@ -779,16 +789,19 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c } var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") +var separatorChars = []byte("._-") func randomPath(length int64) string { - path := "" + path := "/" for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 + chunkLength := rand.Int63n(length-int64(len(path)+1)) + 2 chunk := randomFilename(chunkLength) path += chunk if length-int64(len(path)) == 1 { path += randomFilename(1) - } else if length-int64(len(path)) > 1 { + } else if length-int64(len(path)) == 2 { + path += randomFilename(2) + } else if length-int64(len(path)) > 2 { path += "/" } } @@ -797,8 +810,15 @@ func randomPath(length int64) string { func randomFilename(length int64) string { b := make([]byte, length) + wasSeparator := true for i := range b { - b[i] = filenameChars[rand.Intn(len(filenameChars))] + if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { + b[i] = separatorChars[rand.Intn(len(separatorChars))] + wasSeparator = true + } else { + b[i] = filenameChars[rand.Intn(len(filenameChars))] + wasSeparator = false + } } return string(b) } @@ -821,8 +841,8 @@ func firstPart(filePath string) string { if dir == "" && file == "" { return "/" } - if dir == "" { - return file + if dir == "/" || dir == "" { + return "/" + file } if file == "" { return dir From 5fbf08bab531850dc71f95f11d5fb726ad5f2b06 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Thu, 11 Dec 2014 14:54:04 -0800 Subject: [PATCH 130/165] Adds tests for storage driver handling of valid and invalid paths --- storagedriver/testsuites/testsuites.go | 35 ++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index 8dcc2953..c83bf605 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -118,6 +118,41 @@ func (suite *DriverSuite) TearDownTest(c *check.C) { } } +// TestValidPaths checks that various valid file paths are accepted by the +// storage driver. +func (suite *DriverSuite) TestValidPaths(c *check.C) { + contents := randomContents(64) + validFiles := []string{"/aa", "/a.a", "/0-9/abcdefg", "/abcdefg/z.75", "/abc/1.2.3.4.5-6_zyx/123.z", "/docker/docker-registry"} + + for _, filename := range validFiles { + err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(firstPart(filename)) + c.Assert(err, check.IsNil) + + received, err := suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + c.Assert(received, check.DeepEquals, contents) + } +} + +// TestInvalidPaths checks that various invalid file paths are rejected by the +// storage driver. +func (suite *DriverSuite) TestInvalidPaths(c *check.C) { + contents := randomContents(64) + invalidFiles := []string{"/", "abc", "/abc./abc", "/.abc", "/a--b", "/a-.b", "/_.abc", "/a/bcd", "/abc_123/d", "/Docker/docker-registry"} + + for _, filename := range invalidFiles { + err := suite.StorageDriver.PutContent(filename, contents) + defer suite.StorageDriver.Delete(firstPart(filename)) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.NotNil) + c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) + } +} + // TestWriteRead1 tests a simple write-read workflow. func (suite *DriverSuite) TestWriteRead1(c *check.C) { filename := randomPath(32) From c78d173cf71dce2ed38199c8ead7bcdc573d7512 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:03:45 -0800 Subject: [PATCH 131/165] Move routes to urls package To lock down V2 api routing, we are moving it to a separate package, with an exported router creation function and route names. Updates will follow to prepare the package for export. --- routes.go => api/urls/routes.go | 0 routes_test.go => api/urls/routes_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename routes.go => api/urls/routes.go (100%) rename routes_test.go => api/urls/routes_test.go (100%) diff --git a/routes.go b/api/urls/routes.go similarity index 100% rename from routes.go rename to api/urls/routes.go diff --git a/routes_test.go b/api/urls/routes_test.go similarity index 100% rename from routes_test.go rename to api/urls/routes_test.go From da19114d1a54d65b04e422353155640162d9b728 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:08:23 -0800 Subject: [PATCH 132/165] Prepare urls package for exports The route definition files have been prepared for export with documentation. Consts have been updated and tests are now passing for the urls package. --- api/urls/routes.go | 45 ++++++++++++++++++++++------------------- api/urls/routes_test.go | 28 ++++++++++++------------- 2 files changed, 38 insertions(+), 35 deletions(-) diff --git a/api/urls/routes.go b/api/urls/routes.go index b291ee4b..79138a4a 100644 --- a/api/urls/routes.go +++ b/api/urls/routes.go @@ -1,66 +1,69 @@ -package registry +package urls import ( "github.com/docker/docker-registry/common" "github.com/gorilla/mux" ) +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. const ( - routeNameBase = "base" - routeNameImageManifest = "image-manifest" - routeNameTags = "tags" - routeNameBlob = "blob" - routeNameBlobUpload = "blob-upload" - routeNameBlobUploadResume = "blob-upload-resume" + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" ) var allEndpoints = []string{ - routeNameImageManifest, - routeNameTags, - routeNameBlob, - routeNameBlobUpload, - routeNameBlobUploadResume, + RouteNameManifest, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, } -// v2APIRouter builds a gorilla router with named routes for the various API -// methods. We may export this for use by the client. -func v2APIRouter() *mux.Router { +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { router := mux.NewRouter(). StrictSlash(true) // GET /v2/ Check Check that the registry implements API version 2(.1) router. Path("/v2/"). - Name(routeNameBase) + Name(RouteNameBase) // GET /v2//manifest/ Image Manifest Fetch the image manifest identified by name and tag. // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and tag. // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and tag. router. Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/manifests/{tag:" + common.TagNameRegexp.String() + "}"). - Name(routeNameImageManifest) + Name(RouteNameManifest) // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. router. Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/tags/list"). - Name(routeNameTags) + Name(RouteNameTags) // GET /v2//blob/ Layer Fetch the blob identified by digest. router. Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). - Name(routeNameBlob) + Name(RouteNameBlob) // POST /v2//blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum. router. Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/"). - Name(routeNameBlobUpload) + Name(RouteNameBlobUpload) // GET /v2//blob/upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. // PUT /v2//blob/upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. // DELETE /v2//blob/upload/ Layer Upload Cancel the upload identified by layer and uuid router. Path("/v2/{name:" + common.RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}"). - Name(routeNameBlobUploadResume) + Name(RouteNameBlobUploadChunk) return router } diff --git a/api/urls/routes_test.go b/api/urls/routes_test.go index 6d684a61..f2e95270 100644 --- a/api/urls/routes_test.go +++ b/api/urls/routes_test.go @@ -1,4 +1,4 @@ -package registry +package urls import ( "encoding/json" @@ -25,7 +25,7 @@ type routeTestCase struct { // This may go away as the application structure comes together. func TestRouter(t *testing.T) { - router := v2APIRouter() + router := Router() testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testCase := routeTestCase{ @@ -47,12 +47,12 @@ func TestRouter(t *testing.T) { for _, testcase := range []routeTestCase{ { - RouteName: routeNameBase, + RouteName: RouteNameBase, RequestURI: "/v2/", Vars: map[string]string{}, }, { - RouteName: routeNameImageManifest, + RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/tag", Vars: map[string]string{ "name": "foo/bar", @@ -60,14 +60,14 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameTags, + RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/tags/list", Vars: map[string]string{ "name": "foo/bar", }, }, { - RouteName: routeNameBlob, + RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", @@ -75,7 +75,7 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameBlob, + RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", @@ -83,14 +83,14 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameBlobUpload, + RouteName: RouteNameBlobUpload, RequestURI: "/v2/foo/bar/blobs/uploads/", Vars: map[string]string{ "name": "foo/bar", }, }, { - RouteName: routeNameBlobUploadResume, + RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/uuid", Vars: map[string]string{ "name": "foo/bar", @@ -98,7 +98,7 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameBlobUploadResume, + RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ "name": "foo/bar", @@ -106,7 +106,7 @@ func TestRouter(t *testing.T) { }, }, { - RouteName: routeNameBlobUploadResume, + RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", Vars: map[string]string{ "name": "foo/bar", @@ -117,7 +117,7 @@ func TestRouter(t *testing.T) { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag // "tags" - RouteName: routeNameImageManifest, + RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/manifests/tags", Vars: map[string]string{ "name": "foo/bar/manifests", @@ -127,14 +127,14 @@ func TestRouter(t *testing.T) { { // This case presents an ambiguity between foo/bar with tag="tags" // and list tags for "foo/bar/manifest" - RouteName: routeNameTags, + RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/manifests/tags/list", Vars: map[string]string{ "name": "foo/bar/manifests", }, }, { - RouteName: routeNameBlobUploadResume, + RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, From 5b13e955119926630d1fe57df3ef9f7444df5e7a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:10:43 -0800 Subject: [PATCH 133/165] Porting registry to use urls package This simply moves the registry app to be using the urls package and its exported route names. This supports locking down exported route definitions for use in client packages. --- app.go | 15 ++++++++------- app_test.go | 15 ++++++++------- urls.go | 15 ++++++++------- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/app.go b/app.go index 76605f1b..b34a77de 100644 --- a/app.go +++ b/app.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + "github.com/docker/docker-registry/api/urls" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/factory" @@ -35,18 +36,18 @@ type App struct { func NewApp(configuration configuration.Configuration) *App { app := &App{ Config: configuration, - router: v2APIRouter(), + router: urls.Router(), } // Register the handler dispatchers. - app.register(routeNameBase, func(ctx *Context, r *http.Request) http.Handler { + app.register(urls.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) }) - app.register(routeNameImageManifest, imageManifestDispatcher) - app.register(routeNameTags, tagsDispatcher) - app.register(routeNameBlob, layerDispatcher) - app.register(routeNameBlobUpload, layerUploadDispatcher) - app.register(routeNameBlobUploadResume, layerUploadDispatcher) + app.register(urls.RouteNameManifest, imageManifestDispatcher) + app.register(urls.RouteNameTags, tagsDispatcher) + app.register(urls.RouteNameBlob, layerDispatcher) + app.register(urls.RouteNameBlobUpload, layerUploadDispatcher) + app.register(urls.RouteNameBlobUploadChunk, layerUploadDispatcher) driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) diff --git a/app_test.go b/app_test.go index bb78044a..eb8ec597 100644 --- a/app_test.go +++ b/app_test.go @@ -6,6 +6,7 @@ import ( "net/url" "testing" + "github.com/docker/docker-registry/api/urls" "github.com/docker/docker-registry/configuration" ) @@ -16,10 +17,10 @@ import ( func TestAppDispatcher(t *testing.T) { app := &App{ Config: configuration.Configuration{}, - router: v2APIRouter(), + router: urls.Router(), } server := httptest.NewServer(app) - router := v2APIRouter() + router := urls.Router() serverURL, err := url.Parse(server.URL) if err != nil { @@ -71,33 +72,33 @@ func TestAppDispatcher(t *testing.T) { vars []string }{ { - endpoint: routeNameImageManifest, + endpoint: urls.RouteNameManifest, vars: []string{ "name", "foo/bar", "tag", "sometag", }, }, { - endpoint: routeNameTags, + endpoint: urls.RouteNameTags, vars: []string{ "name", "foo/bar", }, }, { - endpoint: routeNameBlob, + endpoint: urls.RouteNameBlob, vars: []string{ "name", "foo/bar", "digest", "tarsum.v1+bogus:abcdef0123456789", }, }, { - endpoint: routeNameBlobUpload, + endpoint: urls.RouteNameBlobUpload, vars: []string{ "name", "foo/bar", }, }, { - endpoint: routeNameBlobUploadResume, + endpoint: urls.RouteNameBlobUploadChunk, vars: []string{ "name", "foo/bar", "uuid", "theuuid", diff --git a/urls.go b/urls.go index 92233da4..4f294c95 100644 --- a/urls.go +++ b/urls.go @@ -4,6 +4,7 @@ import ( "net/http" "net/url" + "github.com/docker/docker-registry/api/urls" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/mux" @@ -17,7 +18,7 @@ type urlBuilder struct { func newURLBuilder(root *url.URL) *urlBuilder { return &urlBuilder{ url: root, - router: v2APIRouter(), + router: urls.Router(), } } @@ -40,7 +41,7 @@ func newURLBuilderFromString(root string) (*urlBuilder, error) { } func (ub *urlBuilder) buildBaseURL() (string, error) { - route := clonedRoute(ub.router, routeNameBase) + route := clonedRoute(ub.router, urls.RouteNameBase) baseURL, err := route. Schemes(ub.url.Scheme). @@ -54,7 +55,7 @@ func (ub *urlBuilder) buildBaseURL() (string, error) { } func (ub *urlBuilder) buildTagsURL(name string) (string, error) { - route := clonedRoute(ub.router, routeNameTags) + route := clonedRoute(ub.router, urls.RouteNameTags) tagsURL, err := route. Schemes(ub.url.Scheme). @@ -72,7 +73,7 @@ func (ub *urlBuilder) forManifest(m *storage.Manifest) (string, error) { } func (ub *urlBuilder) buildManifestURL(name, tag string) (string, error) { - route := clonedRoute(ub.router, routeNameImageManifest) + route := clonedRoute(ub.router, urls.RouteNameManifest) manifestURL, err := route. Schemes(ub.url.Scheme). @@ -90,7 +91,7 @@ func (ub *urlBuilder) forLayer(l storage.Layer) (string, error) { } func (ub *urlBuilder) buildLayerURL(name string, dgst digest.Digest) (string, error) { - route := clonedRoute(ub.router, routeNameBlob) + route := clonedRoute(ub.router, urls.RouteNameBlob) layerURL, err := route. Schemes(ub.url.Scheme). @@ -104,7 +105,7 @@ func (ub *urlBuilder) buildLayerURL(name string, dgst digest.Digest) (string, er } func (ub *urlBuilder) buildLayerUploadURL(name string) (string, error) { - route := clonedRoute(ub.router, routeNameBlobUpload) + route := clonedRoute(ub.router, urls.RouteNameBlobUpload) uploadURL, err := route. Schemes(ub.url.Scheme). @@ -122,7 +123,7 @@ func (ub *urlBuilder) forLayerUpload(layerUpload storage.LayerUpload) (string, e } func (ub *urlBuilder) buildLayerUploadResumeURL(name, uuid string, values ...url.Values) (string, error) { - route := clonedRoute(ub.router, routeNameBlobUploadResume) + route := clonedRoute(ub.router, urls.RouteNameBlobUploadChunk) uploadURL, err := route. Schemes(ub.url.Scheme). From e5b6da80d092a1d20ad0ec4345a638522353a209 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:15:02 -0800 Subject: [PATCH 134/165] Move urls.go into urls package --- urls.go => api/urls/urls.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename urls.go => api/urls/urls.go (100%) diff --git a/urls.go b/api/urls/urls.go similarity index 100% rename from urls.go rename to api/urls/urls.go From 9b872ca150e89eac269508e47d844b946b1a9e9d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:57:14 -0800 Subject: [PATCH 135/165] Prepare urls.URLBuilder package for export The URLBuilder is now exported with documentation for its behavior. Its a light wrapper around gorilla mux that avoids one having to remember exact arguments take by each route. --- api/urls/urls.go | 145 +++++++++++++++++++++++------------------------ 1 file changed, 70 insertions(+), 75 deletions(-) diff --git a/api/urls/urls.go b/api/urls/urls.go index 4f294c95..7bfd6335 100644 --- a/api/urls/urls.go +++ b/api/urls/urls.go @@ -1,52 +1,61 @@ -package registry +package urls import ( "net/http" "net/url" - "github.com/docker/docker-registry/api/urls" "github.com/docker/docker-registry/digest" - "github.com/docker/docker-registry/storage" "github.com/gorilla/mux" ) -type urlBuilder struct { - url *url.URL // url root (ie http://localhost/) +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) router *mux.Router } -func newURLBuilder(root *url.URL) *urlBuilder { - return &urlBuilder{ - url: root, - router: urls.Router(), +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), } } -func newURLBuilderFromRequest(r *http.Request) *urlBuilder { - u := &url.URL{ - Scheme: r.URL.Scheme, - Host: r.Host, - } - - return newURLBuilder(u) -} - -func newURLBuilderFromString(root string) (*urlBuilder, error) { +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } - return newURLBuilder(u), nil + return NewURLBuilder(u), nil } -func (ub *urlBuilder) buildBaseURL() (string, error) { - route := clonedRoute(ub.router, urls.RouteNameBase) +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { + u := &url.URL{ + Scheme: r.URL.Scheme, + Host: r.Host, + } - baseURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL() + return NewURLBuilder(u) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() if err != nil { return "", err } @@ -54,13 +63,11 @@ func (ub *urlBuilder) buildBaseURL() (string, error) { return baseURL.String(), nil } -func (ub *urlBuilder) buildTagsURL(name string) (string, error) { - route := clonedRoute(ub.router, urls.RouteNameTags) +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { + route := ub.cloneRoute(RouteNameTags) - tagsURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL("name", name) + tagsURL, err := route.URL("name", name) if err != nil { return "", err } @@ -68,17 +75,11 @@ func (ub *urlBuilder) buildTagsURL(name string) (string, error) { return tagsURL.String(), nil } -func (ub *urlBuilder) forManifest(m *storage.Manifest) (string, error) { - return ub.buildManifestURL(m.Name, m.Tag) -} +// BuildManifestURL constructs a url for the manifest identified by name and tag. +func (ub *URLBuilder) BuildManifestURL(name, tag string) (string, error) { + route := ub.cloneRoute(RouteNameManifest) -func (ub *urlBuilder) buildManifestURL(name, tag string) (string, error) { - route := clonedRoute(ub.router, urls.RouteNameManifest) - - manifestURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL("name", name, "tag", tag) + manifestURL, err := route.URL("name", name, "tag", tag) if err != nil { return "", err } @@ -86,17 +87,11 @@ func (ub *urlBuilder) buildManifestURL(name, tag string) (string, error) { return manifestURL.String(), nil } -func (ub *urlBuilder) forLayer(l storage.Layer) (string, error) { - return ub.buildLayerURL(l.Name(), l.Digest()) -} +// BuildLayerURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { + route := ub.cloneRoute(RouteNameBlob) -func (ub *urlBuilder) buildLayerURL(name string, dgst digest.Digest) (string, error) { - route := clonedRoute(ub.router, urls.RouteNameBlob) - - layerURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL("name", name, "digest", dgst.String()) + layerURL, err := route.URL("name", name, "digest", dgst.String()) if err != nil { return "", err } @@ -104,13 +99,12 @@ func (ub *urlBuilder) buildLayerURL(name string, dgst digest.Digest) (string, er return layerURL.String(), nil } -func (ub *urlBuilder) buildLayerUploadURL(name string) (string, error) { - route := clonedRoute(ub.router, urls.RouteNameBlobUpload) +// BuildBlobURL constructs a url to begin a blob upload in the repository +// identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name string) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) - uploadURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL("name", name) + uploadURL, err := route.URL("name", name) if err != nil { return "", err } @@ -118,17 +112,14 @@ func (ub *urlBuilder) buildLayerUploadURL(name string) (string, error) { return uploadURL.String(), nil } -func (ub *urlBuilder) forLayerUpload(layerUpload storage.LayerUpload) (string, error) { - return ub.buildLayerUploadResumeURL(layerUpload.Name(), layerUpload.UUID()) -} +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) -func (ub *urlBuilder) buildLayerUploadResumeURL(name, uuid string, values ...url.Values) (string, error) { - route := clonedRoute(ub.router, urls.RouteNameBlobUploadChunk) - - uploadURL, err := route. - Schemes(ub.url.Scheme). - Host(ub.url.Host). - URL("name", name, "uuid", uuid) + uploadURL, err := route.URL("name", name, "uuid", uuid) if err != nil { return "", err } @@ -136,6 +127,17 @@ func (ub *urlBuilder) buildLayerUploadResumeURL(name, uuid string, values ...url return appendValuesURL(uploadURL, values...).String(), nil } +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) *mux.Route { + route := new(mux.Route) + *route = *ub.router.GetRoute(name) // clone the route + + return route. + Schemes(ub.root.Scheme). + Host(ub.root.Host) +} + // appendValuesURL appends the parameters to the url. func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { merged := u.Query() @@ -161,10 +163,3 @@ func appendValues(u string, values ...url.Values) string { return appendValuesURL(up, values...).String() } - -// clondedRoute returns a clone of the named route from the router. -func clonedRoute(router *mux.Router, name string) *mux.Route { - route := new(mux.Route) - *route = *router.GetRoute(name) // clone the route - return route -} From 83f882b427e4e29f1dabededf1c309493788c84f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 21:59:59 -0800 Subject: [PATCH 136/165] Migrate webapp to use exported urls.URLBuilder package --- api_test.go | 36 ++++++++++++++++++++++-------------- app.go | 2 +- context.go | 3 ++- layerupload.go | 4 ++-- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/api_test.go b/api_test.go index d6cf34dd..e71edc17 100644 --- a/api_test.go +++ b/api_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/urls" "github.com/docker/docker-registry/common/testutil" "github.com/docker/docker-registry/configuration" "github.com/docker/docker-registry/digest" @@ -34,13 +35,13 @@ func TestCheckAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := newURLBuilderFromString(server.URL) + builder, err := urls.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("error creating url builder: %v", err) } - baseURL, err := builder.buildBaseURL() + baseURL, err := builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) } @@ -81,7 +82,7 @@ func TestLayerAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := newURLBuilderFromString(server.URL) + builder, err := urls.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("error creating url builder: %v", err) @@ -98,7 +99,7 @@ func TestLayerAPI(t *testing.T) { // ----------------------------------- // Test fetch for non-existent content - layerURL, err := builder.buildLayerURL(imageName, layerDigest) + layerURL, err := builder.BuildBlobURL(imageName, layerDigest) if err != nil { t.Fatalf("error building url: %v", err) } @@ -121,7 +122,7 @@ func TestLayerAPI(t *testing.T) { // ------------------------------------------ // Upload a layer - layerUploadURL, err := builder.buildLayerUploadURL(imageName) + layerUploadURL, err := builder.BuildBlobUploadURL(imageName) if err != nil { t.Fatalf("error building upload url: %v", err) } @@ -196,7 +197,7 @@ func TestManifestAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := newURLBuilderFromString(server.URL) + builder, err := urls.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("unexpected error creating url builder: %v", err) } @@ -204,7 +205,7 @@ func TestManifestAPI(t *testing.T) { imageName := "foo/bar" tag := "thetag" - manifestURL, err := builder.buildManifestURL(imageName, tag) + manifestURL, err := builder.BuildManifestURL(imageName, tag) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -240,7 +241,7 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected manifest unknown error: got %v", respErrs) } - tagsURL, err := builder.buildTagsURL(imageName) + tagsURL, err := builder.BuildTagsURL(imageName) if err != nil { t.Fatalf("unexpected error building tags url: %v", err) } @@ -427,8 +428,8 @@ func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { return resp } -func startPushLayer(t *testing.T, ub *urlBuilder, name string) string { - layerUploadURL, err := ub.buildLayerUploadURL(name) +func startPushLayer(t *testing.T, ub *urls.URLBuilder, name string) string { + layerUploadURL, err := ub.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) } @@ -449,14 +450,21 @@ func startPushLayer(t *testing.T, ub *urlBuilder, name string) string { } // pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *urlBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { +func pushLayer(t *testing.T, ub *urls.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { rsLength, _ := rs.Seek(0, os.SEEK_END) rs.Seek(0, os.SEEK_SET) - uploadURL := appendValues(uploadURLBase, url.Values{ + u, err := url.Parse(uploadURLBase) + if err != nil { + t.Fatalf("unexpected error parsing pushLayer url: %v", err) + } + + u.RawQuery = url.Values{ "digest": []string{dgst.String()}, "size": []string{fmt.Sprint(rsLength)}, - }) + }.Encode() + + uploadURL := u.String() // Just do a monolithic upload req, err := http.NewRequest("PUT", uploadURL, rs) @@ -472,7 +480,7 @@ func pushLayer(t *testing.T, ub *urlBuilder, name string, dgst digest.Digest, up checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - expectedLayerURL, err := ub.buildLayerURL(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(name, dgst) if err != nil { t.Fatalf("error building expected layer url: %v", err) } diff --git a/app.go b/app.go index b34a77de..e1b299a7 100644 --- a/app.go +++ b/app.go @@ -115,7 +115,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context := &Context{ App: app, Name: vars["name"], - urlBuilder: newURLBuilderFromRequest(r), + urlBuilder: urls.NewURLBuilderFromRequest(r), } // Store vars for underlying handlers. diff --git a/context.go b/context.go index a1e47abe..23d669b8 100644 --- a/context.go +++ b/context.go @@ -3,6 +3,7 @@ package registry import ( "github.com/Sirupsen/logrus" "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/urls" ) // Context should contain the request specific context for use in across @@ -28,5 +29,5 @@ type Context struct { // log provides a context specific logger. log *logrus.Entry - urlBuilder *urlBuilder + urlBuilder *urls.URLBuilder } diff --git a/layerupload.go b/layerupload.go index af8bd457..898b279b 100644 --- a/layerupload.go +++ b/layerupload.go @@ -151,7 +151,7 @@ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http. // chunk responses. This sets the correct headers but the response status is // left to the caller. func (luh *layerUploadHandler) layerUploadResponse(w http.ResponseWriter, r *http.Request) error { - uploadURL, err := luh.urlBuilder.forLayerUpload(luh.Upload) + uploadURL, err := luh.urlBuilder.BuildBlobUploadChunkURL(luh.Upload.Name(), luh.Upload.UUID()) if err != nil { logrus.Infof("error building upload url: %s", err) return err @@ -200,7 +200,7 @@ func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Req return } - layerURL, err := luh.urlBuilder.forLayer(layer) + layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) if err != nil { luh.Errors.Push(errors.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) From e14e5d14b104cf0cd309c2ee516d3a049f4d31ab Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 22:02:17 -0800 Subject: [PATCH 137/165] Correct documentation errors in urls package --- api/urls/urls.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/urls/urls.go b/api/urls/urls.go index 7bfd6335..86306c67 100644 --- a/api/urls/urls.go +++ b/api/urls/urls.go @@ -87,7 +87,7 @@ func (ub *URLBuilder) BuildManifestURL(name, tag string) (string, error) { return manifestURL.String(), nil } -// BuildLayerURL constructs the url for the blob identified by name and dgst. +// BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) @@ -99,8 +99,8 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err return layerURL.String(), nil } -// BuildBlobURL constructs a url to begin a blob upload in the repository -// identified by name. +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. func (ub *URLBuilder) BuildBlobUploadURL(name string) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) From 92dca269f0ccf4fe744ccc7a298d0093321fe113 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 22:04:26 -0800 Subject: [PATCH 138/165] Change errors export from Descriptors to ErrorDescriptors --- api/errors/descriptors.go | 12 ++++++------ api/errors/errors_test.go | 2 +- cmd/registry-api-doctable-gen/main.go | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/errors/descriptors.go b/api/errors/descriptors.go index 1d71162f..d2f0f7da 100644 --- a/api/errors/descriptors.go +++ b/api/errors/descriptors.go @@ -26,9 +26,9 @@ type ErrorDescriptor struct { HTTPStatusCodes []int } -// Descriptors provides a list of HTTP API Error codes that may be encountered -// when interacting with the registry API. -var Descriptors = []ErrorDescriptor{ +// ErrorDescriptors provides a list of HTTP API Error codes that may be +// encountered when interacting with the registry API. +var ErrorDescriptors = []ErrorDescriptor{ { Code: ErrorCodeUnknown, Value: "UNKNOWN", @@ -131,10 +131,10 @@ var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor var idToDescriptors map[string]ErrorDescriptor func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(Descriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(Descriptors)) + errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(ErrorDescriptors)) + idToDescriptors = make(map[string]ErrorDescriptor, len(ErrorDescriptors)) - for _, descriptor := range Descriptors { + for _, descriptor := range ErrorDescriptors { errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor } diff --git a/api/errors/errors_test.go b/api/errors/errors_test.go index 7a68fe90..2f5c69e0 100644 --- a/api/errors/errors_test.go +++ b/api/errors/errors_test.go @@ -11,7 +11,7 @@ import ( // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { - for _, desc := range Descriptors { + for _, desc := range ErrorDescriptors { if desc.Code.String() != desc.Value { t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) } diff --git a/cmd/registry-api-doctable-gen/main.go b/cmd/registry-api-doctable-gen/main.go index f76c249e..869f1a37 100644 --- a/cmd/registry-api-doctable-gen/main.go +++ b/cmd/registry-api-doctable-gen/main.go @@ -61,7 +61,7 @@ func dumpErrors(wr io.Writer) { fmt.Fprintln(writer, "\n"+divider) - for _, descriptor := range errors.Descriptors { + for _, descriptor := range errors.ErrorDescriptors { fmt.Fprint(writer, "|") v := reflect.ValueOf(descriptor) From 5abfc91021077f522f2aa9f3b5f9b2ea3d903980 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 22:10:18 -0800 Subject: [PATCH 139/165] Merge errors and urls package into unified v2 package To provide a single package with v2 API definitions, the locked down portions of the API have been merged into a single package. References to exported items will appear as v2.XXX, indicating their origin. The definitions in this package will soon be locked down for change, acceppting only additions that will not change protocol behavior. --- api/{errors => v2}/descriptors.go | 2 +- api/v2/doc.go | 9 +++++++++ api/{errors => v2}/errors.go | 10 +--------- api/{errors => v2}/errors_test.go | 2 +- api/{urls => v2}/routes.go | 2 +- api/{urls => v2}/routes_test.go | 2 +- api/{urls => v2}/urls.go | 2 +- 7 files changed, 15 insertions(+), 14 deletions(-) rename api/{errors => v2}/descriptors.go (99%) create mode 100644 api/v2/doc.go rename api/{errors => v2}/errors.go (91%) rename api/{errors => v2}/errors_test.go (99%) rename api/{urls => v2}/routes.go (99%) rename api/{urls => v2}/routes_test.go (99%) rename api/{urls => v2}/urls.go (99%) diff --git a/api/errors/descriptors.go b/api/v2/descriptors.go similarity index 99% rename from api/errors/descriptors.go rename to api/v2/descriptors.go index d2f0f7da..77528d72 100644 --- a/api/errors/descriptors.go +++ b/api/v2/descriptors.go @@ -1,4 +1,4 @@ -package errors +package v2 import "net/http" diff --git a/api/v2/doc.go b/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/api/errors/errors.go b/api/v2/errors.go similarity index 91% rename from api/errors/errors.go rename to api/v2/errors.go index b6e64e2a..8c85d3a9 100644 --- a/api/errors/errors.go +++ b/api/v2/errors.go @@ -1,12 +1,4 @@ -// Package errors describes the error codes that may be returned via the -// Docker Registry JSON HTTP API V2. In addition to declaractions, -// descriptions about the error codes and the conditions causing them are -// avialable in detail. -// -// Error definitions here are considered to be locked down for the V2 registry -// api. Any changes must be considered carefully and should not proceed -// without a change proposal in docker core. -package errors +package v2 import ( "fmt" diff --git a/api/errors/errors_test.go b/api/v2/errors_test.go similarity index 99% rename from api/errors/errors_test.go rename to api/v2/errors_test.go index 2f5c69e0..d2fc091a 100644 --- a/api/errors/errors_test.go +++ b/api/v2/errors_test.go @@ -1,4 +1,4 @@ -package errors +package v2 import ( "encoding/json" diff --git a/api/urls/routes.go b/api/v2/routes.go similarity index 99% rename from api/urls/routes.go rename to api/v2/routes.go index 79138a4a..7ebe61d6 100644 --- a/api/urls/routes.go +++ b/api/v2/routes.go @@ -1,4 +1,4 @@ -package urls +package v2 import ( "github.com/docker/docker-registry/common" diff --git a/api/urls/routes_test.go b/api/v2/routes_test.go similarity index 99% rename from api/urls/routes_test.go rename to api/v2/routes_test.go index f2e95270..9969ebcc 100644 --- a/api/urls/routes_test.go +++ b/api/v2/routes_test.go @@ -1,4 +1,4 @@ -package urls +package v2 import ( "encoding/json" diff --git a/api/urls/urls.go b/api/v2/urls.go similarity index 99% rename from api/urls/urls.go rename to api/v2/urls.go index 86306c67..15d65484 100644 --- a/api/urls/urls.go +++ b/api/v2/urls.go @@ -1,4 +1,4 @@ -package urls +package v2 import ( "net/http" From d08f0edcf18c0a854c8bda6ac8e1ac41598b3639 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 11 Dec 2014 22:24:25 -0800 Subject: [PATCH 140/165] Migrate references to consolidated v2 package Routes and errors are now all referenced from a single v2 package. This packages exports are acceptable for use in the server side as well as integration into docker core. --- api/v2/descriptors.go | 3 +++ api_test.go | 25 ++++++++++++------------- app.go | 18 +++++++++--------- app_test.go | 16 ++++++++-------- client/client.go | 26 +++++++++++++------------- client/objectstore.go | 5 ++--- client/push.go | 7 +++---- cmd/registry-api-doctable-gen/main.go | 6 +++--- context.go | 7 +++---- images.go | 16 ++++++++-------- layer.go | 8 ++++---- layerupload.go | 24 ++++++++++++------------ tags.go | 4 ++-- 13 files changed, 82 insertions(+), 83 deletions(-) diff --git a/api/v2/descriptors.go b/api/v2/descriptors.go index 77528d72..68d18241 100644 --- a/api/v2/descriptors.go +++ b/api/v2/descriptors.go @@ -2,6 +2,9 @@ package v2 import "net/http" +// TODO(stevvooe): Add route descriptors for each named route, along with +// accepted methods, parameters, returned status codes and error codes. + // ErrorDescriptor provides relevant information about a given error code. type ErrorDescriptor struct { // Code is the error code that this descriptor describes. diff --git a/api_test.go b/api_test.go index e71edc17..a650a102 100644 --- a/api_test.go +++ b/api_test.go @@ -13,8 +13,7 @@ import ( "os" "testing" - "github.com/docker/docker-registry/api/errors" - "github.com/docker/docker-registry/api/urls" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/common/testutil" "github.com/docker/docker-registry/configuration" "github.com/docker/docker-registry/digest" @@ -35,7 +34,7 @@ func TestCheckAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := urls.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("error creating url builder: %v", err) @@ -82,7 +81,7 @@ func TestLayerAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := urls.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("error creating url builder: %v", err) @@ -197,7 +196,7 @@ func TestManifestAPI(t *testing.T) { app := NewApp(config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := urls.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("unexpected error creating url builder: %v", err) } @@ -228,7 +227,7 @@ func TestManifestAPI(t *testing.T) { // } dec := json.NewDecoder(resp.Body) - var respErrs errors.Errors + var respErrs v2.Errors if err := dec.Decode(&respErrs); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } @@ -237,7 +236,7 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected errors in response") } - if respErrs.Errors[0].Code != errors.ErrorCodeManifestUnknown { + if respErrs.Errors[0].Code != v2.ErrorCodeManifestUnknown { t.Fatalf("expected manifest unknown error: got %v", respErrs) } @@ -263,7 +262,7 @@ func TestManifestAPI(t *testing.T) { t.Fatalf("expected errors in response") } - if respErrs.Errors[0].Code != errors.ErrorCodeNameUnknown { + if respErrs.Errors[0].Code != v2.ErrorCodeNameUnknown { t.Fatalf("expected respository unknown error: got %v", respErrs) } @@ -297,11 +296,11 @@ func TestManifestAPI(t *testing.T) { for _, err := range respErrs.Errors { switch err.Code { - case errors.ErrorCodeManifestUnverified: + case v2.ErrorCodeManifestUnverified: unverified++ - case errors.ErrorCodeBlobUnknown: + case v2.ErrorCodeBlobUnknown: missingLayers++ - case errors.ErrorCodeDigestInvalid: + case v2.ErrorCodeDigestInvalid: // TODO(stevvooe): This error isn't quite descriptive enough -- // the layer with an invalid digest isn't identified. invalidDigests++ @@ -428,7 +427,7 @@ func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { return resp } -func startPushLayer(t *testing.T, ub *urls.URLBuilder, name string) string { +func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) string { layerUploadURL, err := ub.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) @@ -450,7 +449,7 @@ func startPushLayer(t *testing.T, ub *urls.URLBuilder, name string) string { } // pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *urls.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { +func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, rs io.ReadSeeker) string { rsLength, _ := rs.Seek(0, os.SEEK_END) rs.Seek(0, os.SEEK_SET) diff --git a/app.go b/app.go index e1b299a7..5a770c6c 100644 --- a/app.go +++ b/app.go @@ -4,7 +4,7 @@ import ( "fmt" "net/http" - "github.com/docker/docker-registry/api/urls" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/storagedriver" "github.com/docker/docker-registry/storagedriver/factory" @@ -36,18 +36,18 @@ type App struct { func NewApp(configuration configuration.Configuration) *App { app := &App{ Config: configuration, - router: urls.Router(), + router: v2.Router(), } // Register the handler dispatchers. - app.register(urls.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { + app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) }) - app.register(urls.RouteNameManifest, imageManifestDispatcher) - app.register(urls.RouteNameTags, tagsDispatcher) - app.register(urls.RouteNameBlob, layerDispatcher) - app.register(urls.RouteNameBlobUpload, layerUploadDispatcher) - app.register(urls.RouteNameBlobUploadChunk, layerUploadDispatcher) + app.register(v2.RouteNameManifest, imageManifestDispatcher) + app.register(v2.RouteNameTags, tagsDispatcher) + app.register(v2.RouteNameBlob, layerDispatcher) + app.register(v2.RouteNameBlobUpload, layerUploadDispatcher) + app.register(v2.RouteNameBlobUploadChunk, layerUploadDispatcher) driver, err := factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) @@ -115,7 +115,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context := &Context{ App: app, Name: vars["name"], - urlBuilder: urls.NewURLBuilderFromRequest(r), + urlBuilder: v2.NewURLBuilderFromRequest(r), } // Store vars for underlying handlers. diff --git a/app_test.go b/app_test.go index eb8ec597..f256c968 100644 --- a/app_test.go +++ b/app_test.go @@ -6,7 +6,7 @@ import ( "net/url" "testing" - "github.com/docker/docker-registry/api/urls" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/configuration" ) @@ -17,10 +17,10 @@ import ( func TestAppDispatcher(t *testing.T) { app := &App{ Config: configuration.Configuration{}, - router: urls.Router(), + router: v2.Router(), } server := httptest.NewServer(app) - router := urls.Router() + router := v2.Router() serverURL, err := url.Parse(server.URL) if err != nil { @@ -72,33 +72,33 @@ func TestAppDispatcher(t *testing.T) { vars []string }{ { - endpoint: urls.RouteNameManifest, + endpoint: v2.RouteNameManifest, vars: []string{ "name", "foo/bar", "tag", "sometag", }, }, { - endpoint: urls.RouteNameTags, + endpoint: v2.RouteNameTags, vars: []string{ "name", "foo/bar", }, }, { - endpoint: urls.RouteNameBlob, + endpoint: v2.RouteNameBlob, vars: []string{ "name", "foo/bar", "digest", "tarsum.v1+bogus:abcdef0123456789", }, }, { - endpoint: urls.RouteNameBlobUpload, + endpoint: v2.RouteNameBlobUpload, vars: []string{ "name", "foo/bar", }, }, { - endpoint: urls.RouteNameBlobUploadChunk, + endpoint: v2.RouteNameBlobUploadChunk, vars: []string{ "name", "foo/bar", "uuid", "theuuid", diff --git a/client/client.go b/client/client.go index 8f31cb4e..e25fbff4 100644 --- a/client/client.go +++ b/client/client.go @@ -10,7 +10,7 @@ import ( "regexp" "strconv" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" ) @@ -96,7 +96,7 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest case response.StatusCode == http.StatusNotFound: return nil, &ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) @@ -136,7 +136,7 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.Signed case response.StatusCode == http.StatusOK: return nil case response.StatusCode >= 400 && response.StatusCode < 500: - var errors errors.Errors + var errors v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errors) if err != nil { @@ -169,7 +169,7 @@ func (r *clientImpl) DeleteImage(name, tag string) error { case response.StatusCode == http.StatusNotFound: return &ImageManifestNotFoundError{Name: name, Tag: tag} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -197,7 +197,7 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { case response.StatusCode == http.StatusNotFound: return nil, &RepositoryNotFoundError{Name: name} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -240,7 +240,7 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { case response.StatusCode == http.StatusNotFound: return -1, nil case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -279,7 +279,7 @@ func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (i response.Body.Close() return nil, 0, &BlobNotFoundError{Name: name, Digest: dgst} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -312,7 +312,7 @@ func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { // case response.StatusCode == http.StatusNotFound: // return case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -338,7 +338,7 @@ func (r *clientImpl) GetBlobUploadStatus(location string) (int, int, error) { case response.StatusCode == http.StatusNotFound: return 0, 0, &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -379,7 +379,7 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, case response.StatusCode == http.StatusNotFound: return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -430,7 +430,7 @@ func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, l case response.StatusCode == http.StatusNotFound: return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -472,7 +472,7 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst d case response.StatusCode == http.StatusNotFound: return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { @@ -504,7 +504,7 @@ func (r *clientImpl) CancelBlobUpload(location string) error { case response.StatusCode == http.StatusNotFound: return &BlobUploadNotFoundError{Location: location} case response.StatusCode >= 400 && response.StatusCode < 500: - var errs errors.Errors + var errs v2.Errors decoder := json.NewDecoder(response.Body) err = decoder.Decode(&errs) if err != nil { diff --git a/client/objectstore.go b/client/objectstore.go index 06fba3d8..55ab20a5 100644 --- a/client/objectstore.go +++ b/client/objectstore.go @@ -2,7 +2,6 @@ package client import ( "bytes" - "errors" "fmt" "io" "sync" @@ -14,11 +13,11 @@ import ( var ( // ErrLayerAlreadyExists is returned when attempting to create a layer with // a tarsum that is already in use. - ErrLayerAlreadyExists = errors.New("Layer already exists") + ErrLayerAlreadyExists = fmt.Errorf("Layer already exists") // ErrLayerLocked is returned when attempting to write to a layer which is // currently being written to. - ErrLayerLocked = errors.New("Layer locked") + ErrLayerLocked = fmt.Errorf("Layer locked") ) // ObjectStore is an interface which is designed to approximate the docker diff --git a/client/push.go b/client/push.go index 61853b53..aac3fc40 100644 --- a/client/push.go +++ b/client/push.go @@ -1,11 +1,10 @@ package client import ( - "errors" - - "github.com/docker/docker-registry/storage" + "fmt" log "github.com/Sirupsen/logrus" + "github.com/docker/docker-registry/storage" ) // simultaneousLayerPushWindow is the size of the parallel layer push window. @@ -100,7 +99,7 @@ func pushLayer(c Client, objectStore ObjectStore, name string, fsLayer storage.F "currentSize": layerReader.CurrentSize(), "size": layerReader.Size(), }).Warn("Local layer incomplete") - return errors.New("Local layer incomplete") + return fmt.Errorf("Local layer incomplete") } length, err := c.BlobLength(name, fsLayer.BlobSum) diff --git a/cmd/registry-api-doctable-gen/main.go b/cmd/registry-api-doctable-gen/main.go index 869f1a37..a9e71fff 100644 --- a/cmd/registry-api-doctable-gen/main.go +++ b/cmd/registry-api-doctable-gen/main.go @@ -17,7 +17,7 @@ import ( "strings" "text/tabwriter" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" ) func main() { @@ -40,7 +40,7 @@ func dumpErrors(wr io.Writer) { defer writer.Flush() fmt.Fprint(writer, "|") - dtype := reflect.TypeOf(errors.ErrorDescriptor{}) + dtype := reflect.TypeOf(v2.ErrorDescriptor{}) var fieldsPrinted int for i := 0; i < dtype.NumField(); i++ { field := dtype.Field(i) @@ -61,7 +61,7 @@ func dumpErrors(wr io.Writer) { fmt.Fprintln(writer, "\n"+divider) - for _, descriptor := range errors.ErrorDescriptors { + for _, descriptor := range v2.ErrorDescriptors { fmt.Fprint(writer, "|") v := reflect.ValueOf(descriptor) diff --git a/context.go b/context.go index 23d669b8..0c5ba587 100644 --- a/context.go +++ b/context.go @@ -2,8 +2,7 @@ package registry import ( "github.com/Sirupsen/logrus" - "github.com/docker/docker-registry/api/errors" - "github.com/docker/docker-registry/api/urls" + "github.com/docker/docker-registry/api/v2" ) // Context should contain the request specific context for use in across @@ -20,7 +19,7 @@ type Context struct { // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. - Errors errors.Errors + Errors v2.Errors // vars contains the extracted gorilla/mux variables that can be used for // assignment. @@ -29,5 +28,5 @@ type Context struct { // log provides a context specific logger. log *logrus.Entry - urlBuilder *urls.URLBuilder + urlBuilder *v2.URLBuilder } diff --git a/images.go b/images.go index 74ae067e..5a373f1f 100644 --- a/images.go +++ b/images.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" @@ -41,7 +41,7 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http manifest, err := manifests.Get(imh.Name, imh.Tag) if err != nil { - imh.Errors.Push(errors.ErrorCodeManifestUnknown, err) + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) w.WriteHeader(http.StatusNotFound) return } @@ -58,7 +58,7 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http var manifest storage.SignedManifest if err := dec.Decode(&manifest); err != nil { - imh.Errors.Push(errors.ErrorCodeManifestInvalid, err) + imh.Errors.Push(v2.ErrorCodeManifestInvalid, err) w.WriteHeader(http.StatusBadRequest) return } @@ -71,14 +71,14 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http for _, verificationError := range err { switch verificationError := verificationError.(type) { case storage.ErrUnknownLayer: - imh.Errors.Push(errors.ErrorCodeBlobUnknown, verificationError.FSLayer) + imh.Errors.Push(v2.ErrorCodeBlobUnknown, verificationError.FSLayer) case storage.ErrManifestUnverified: - imh.Errors.Push(errors.ErrorCodeManifestUnverified) + imh.Errors.Push(v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { // TODO(stevvooe): We need to really need to move all // errors to types. Its much more straightforward. - imh.Errors.Push(errors.ErrorCodeDigestInvalid) + imh.Errors.Push(v2.ErrorCodeDigestInvalid) } else { imh.Errors.PushErr(verificationError) } @@ -99,10 +99,10 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h if err := manifests.Delete(imh.Name, imh.Tag); err != nil { switch err := err.(type) { case storage.ErrUnknownManifest: - imh.Errors.Push(errors.ErrorCodeManifestUnknown, err) + imh.Errors.Push(v2.ErrorCodeManifestUnknown, err) w.WriteHeader(http.StatusNotFound) default: - imh.Errors.Push(errors.ErrorCodeUnknown, err) + imh.Errors.Push(v2.ErrorCodeUnknown, err) w.WriteHeader(http.StatusBadRequest) } return diff --git a/layer.go b/layer.go index 4da7723a..094a54cf 100644 --- a/layer.go +++ b/layer.go @@ -3,7 +3,7 @@ package registry import ( "net/http" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" @@ -15,7 +15,7 @@ func layerDispatcher(ctx *Context, r *http.Request) http.Handler { if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx.Errors.Push(errors.ErrorCodeDigestInvalid, err) + ctx.Errors.Push(v2.ErrorCodeDigestInvalid, err) }) } @@ -50,9 +50,9 @@ func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) { switch err := err.(type) { case storage.ErrUnknownLayer: w.WriteHeader(http.StatusNotFound) - lh.Errors.Push(errors.ErrorCodeBlobUnknown, err.FSLayer) + lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer) default: - lh.Errors.Push(errors.ErrorCodeUnknown, err) + lh.Errors.Push(v2.ErrorCodeUnknown, err) } return } diff --git a/layerupload.go b/layerupload.go index 898b279b..b65c8ef2 100644 --- a/layerupload.go +++ b/layerupload.go @@ -7,7 +7,7 @@ import ( "strconv" "github.com/Sirupsen/logrus" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/digest" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" @@ -39,7 +39,7 @@ func layerUploadDispatcher(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logrus.Infof("error resolving upload: %v", err) w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) }) } @@ -67,7 +67,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R upload, err := layers.Upload(luh.Name) if err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) return } @@ -76,7 +76,7 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) return } w.WriteHeader(http.StatusAccepted) @@ -86,12 +86,12 @@ func (luh *layerUploadHandler) StartLayerUpload(w http.ResponseWriter, r *http.R func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) } if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) return } @@ -103,7 +103,7 @@ func (luh *layerUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Re func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) } var finished bool @@ -120,14 +120,14 @@ func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Requ if err := luh.maybeCompleteUpload(w, r); err != nil { if err != errNotReadyToComplete { w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) return } } if err := luh.layerUploadResponse(w, r); err != nil { w.WriteHeader(http.StatusInternalServerError) // Error conditions here? - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) return } @@ -142,7 +142,7 @@ func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Requ func (luh *layerUploadHandler) CancelLayerUpload(w http.ResponseWriter, r *http.Request) { if luh.Upload == nil { w.WriteHeader(http.StatusNotFound) - luh.Errors.Push(errors.ErrorCodeBlobUploadUnknown) + luh.Errors.Push(v2.ErrorCodeBlobUploadUnknown) } } @@ -195,14 +195,14 @@ func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *htt func (luh *layerUploadHandler) completeUpload(w http.ResponseWriter, r *http.Request, size int64, dgst digest.Digest) { layer, err := luh.Upload.Finish(size, dgst) if err != nil { - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) return } layerURL, err := luh.urlBuilder.BuildBlobURL(layer.Name(), layer.Digest()) if err != nil { - luh.Errors.Push(errors.ErrorCodeUnknown, err) + luh.Errors.Push(v2.ErrorCodeUnknown, err) w.WriteHeader(http.StatusInternalServerError) return } diff --git a/tags.go b/tags.go index 12a5062f..04d994b9 100644 --- a/tags.go +++ b/tags.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net/http" - "github.com/docker/docker-registry/api/errors" + "github.com/docker/docker-registry/api/v2" "github.com/docker/docker-registry/storage" "github.com/gorilla/handlers" ) @@ -40,7 +40,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { switch err := err.(type) { case storage.ErrUnknownRepository: w.WriteHeader(404) - th.Errors.Push(errors.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) + th.Errors.Push(v2.ErrorCodeNameUnknown, map[string]string{"name": th.Name}) default: th.Errors.PushErr(err) } From bb300231d04ec749818a5640b14d03790390c1ac Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 12 Dec 2014 13:55:14 -0800 Subject: [PATCH 141/165] Allow variadic url values for BuildBlobUploadURL URL values may be required to complete an upload in a single POST request, which may include digest and size. This is not implemented in the server side, yet, but is part of the HTTP API specification. --- api/v2/urls.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/v2/urls.go b/api/v2/urls.go index 15d65484..72f44299 100644 --- a/api/v2/urls.go +++ b/api/v2/urls.go @@ -101,7 +101,7 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string) (string, error) { +func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) uploadURL, err := route.URL("name", name) @@ -109,7 +109,7 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string) (string, error) { return "", err } - return uploadURL.String(), nil + return appendValuesURL(uploadURL, values...).String(), nil } // BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, From 39169384810393a00c40ed4dfaf4fa4fe4ce3d08 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 12 Dec 2014 15:48:41 -0800 Subject: [PATCH 142/165] Add tests for URLBuilder --- api/v2/urls_test.go | 100 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 api/v2/urls_test.go diff --git a/api/v2/urls_test.go b/api/v2/urls_test.go new file mode 100644 index 00000000..a9590dba --- /dev/null +++ b/api/v2/urls_test.go @@ -0,0 +1,100 @@ +package v2 + +import ( + "net/url" + "testing" +) + +type urlBuilderTestCase struct { + description string + expected string + build func() (string, error) +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + + root := "http://localhost:5000/" + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testcase := range []struct { + description string + expected string + build func() (string, error) + }{ + { + description: "test base url", + expected: "http://localhost:5000/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expected: "http://localhost:5000/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL("foo/bar") + }, + }, + { + description: "test manifest url", + expected: "http://localhost:5000/v2/foo/bar/manifests/tag", + build: func() (string, error) { + return urlBuilder.BuildManifestURL("foo/bar", "tag") + }, + }, + { + description: "build blob url", + expected: "http://localhost:5000/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + build: func() (string, error) { + return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + }, + }, + { + description: "build blob upload url", + expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar") + }, + }, + { + description: "build blob upload url with digest and size", + expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expected: "http://localhost:5000/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + } { + u, err := testcase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testcase.description, err) + } + + if u != testcase.expected { + t.Fatalf("%s: %q != %q", testcase.description, u, testcase.expected) + } + } + +} From 43a50b5e82a64a9e15af8cebe7b22770d94994a2 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 12 Dec 2014 17:49:06 -0800 Subject: [PATCH 143/165] Adds bugsnag and newrelic metrics and error reporting Configuration variables are added under the REGISTRY_REPORTING namespace, for example REGISTRY_REPORTING_BUGSNAG_APIKEY --- cmd/registry/main.go | 42 +++++++++++++++++++++++++++++++++- configuration/configuration.go | 41 +++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) diff --git a/cmd/registry/main.go b/cmd/registry/main.go index 29fa24c1..ba859eec 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -11,6 +11,9 @@ import ( log "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" + "github.com/yvasiyarov/gorelic" + "github.com/docker/docker-registry" "github.com/docker/docker-registry/configuration" _ "github.com/docker/docker-registry/storagedriver/filesystem" @@ -27,7 +30,8 @@ func main() { } app := registry.NewApp(*config) - handler := handlers.CombinedLoggingHandler(os.Stdout, app) + handler := configureReporting(app) + handler = handlers.CombinedLoggingHandler(os.Stdout, handler) log.SetLevel(logLevel(config.Loglevel)) log.Infof("listening on %v", config.HTTP.Addr) @@ -82,3 +86,39 @@ func logLevel(level configuration.Loglevel) log.Level { return l } + +func configureReporting(app *registry.App) http.Handler { + var handler http.Handler = app + + if app.Config.Reporting.Bugsnag.APIKey != "" { + bugsnagConfig := bugsnag.Configuration{ + APIKey: app.Config.Reporting.Bugsnag.APIKey, + // TODO(brianbland): provide the registry version here + // AppVersion: "2.0", + } + if app.Config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage + } + if app.Config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + handler = bugsnag.Handler(handler) + } + + if app.Config.Reporting.NewRelic.LicenseKey != "" { + agent := gorelic.NewAgent() + agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey + if app.Config.Reporting.NewRelic.Name != "" { + agent.NewrelicName = app.Config.Reporting.NewRelic.Name + } + agent.CollectHTTPStat = true + agent.Verbose = true + agent.Run() + + handler = agent.WrapHTTPHandler(handler) + } + + return handler +} diff --git a/configuration/configuration.go b/configuration/configuration.go index 2d7e476b..96d664f1 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -24,6 +24,9 @@ type Configuration struct { // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` + // Reporting is the configuration for error reporting + Reporting Reporting `yaml:"reporting"` + // HTTP contains configuration parameters for the registry's http // interface. HTTP struct { @@ -180,6 +183,27 @@ func (storage Storage) MarshalYAML() (interface{}, error) { // Parameters defines a key-value parameters mapping type Parameters map[string]string +// Reporting defines error reporting methods. +type Reporting struct { + // Bugsnag configures error reporting for Bugsnag (bugsnag.com). + Bugsnag struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint"` + } `yaml:"bugsnag"` + // NewRelic configures error reporting for NewRelic (newrelic.com) + NewRelic struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey"` + // AppName is the component name of the registry in NewRelic + Name string `yaml:"name"` + } `yaml:"newrelic"` +} + // Parse parses an input configuration yaml document into a Configuration struct // This should generally be capable of handling old configuration format versions // @@ -264,6 +288,23 @@ func parseV0_1Registry(in []byte) (*Configuration, error) { } } + if bugsnagAPIKey, ok := envMap["REGISTRY_REPORTING_BUGSNAG_APIKEY"]; ok { + config.Reporting.Bugsnag.APIKey = bugsnagAPIKey + } + if bugsnagReleaseStage, ok := envMap["REGISTRY_REPORTING_BUGSNAG_RELEASESTAGE"]; ok { + config.Reporting.Bugsnag.ReleaseStage = bugsnagReleaseStage + } + if bugsnagEndpoint, ok := envMap["REGISTRY_REPORTING_BUGSNAG_ENDPOINT"]; ok { + config.Reporting.Bugsnag.Endpoint = bugsnagEndpoint + } + + if newRelicLicenseKey, ok := envMap["REGISTRY_REPORTING_NEWRELIC_LICENSEKEY"]; ok { + config.Reporting.NewRelic.LicenseKey = newRelicLicenseKey + } + if newRelicName, ok := envMap["REGISTRY_REPORTING_NEWRELIC_NAME"]; ok { + config.Reporting.NewRelic.Name = newRelicName + } + return (*Configuration)(&config), nil } From cd057fd120d21fcba9a529def8ce8791d55f0dd3 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Fri, 12 Dec 2014 18:05:37 -0800 Subject: [PATCH 144/165] Updates configuration tests for bugsnag and newrelic params --- configuration/configuration.go | 36 +++++++++++++++++------------ configuration/configuration_test.go | 29 +++++++++++++++++++++++ 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index 96d664f1..ab962a90 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -186,22 +186,28 @@ type Parameters map[string]string // Reporting defines error reporting methods. type Reporting struct { // Bugsnag configures error reporting for Bugsnag (bugsnag.com). - Bugsnag struct { - // APIKey is the Bugsnag api key. - APIKey string `yaml:"apikey"` - // ReleaseStage tracks where the registry is deployed. - // Examples: production, staging, development - ReleaseStage string `yaml:"releasestage"` - // Endpoint is used for specifying an enterprise Bugsnag endpoint. - Endpoint string `yaml:"endpoint"` - } `yaml:"bugsnag"` + Bugsnag BugsnagReporting `yaml:"bugsnag"` // NewRelic configures error reporting for NewRelic (newrelic.com) - NewRelic struct { - // LicenseKey is the NewRelic user license key - LicenseKey string `yaml:"licensekey"` - // AppName is the component name of the registry in NewRelic - Name string `yaml:"name"` - } `yaml:"newrelic"` + NewRelic NewRelicReporting `yaml:"newrelic"` +} + +// BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). +type BugsnagReporting struct { + // APIKey is the Bugsnag api key. + APIKey string `yaml:"apikey"` + // ReleaseStage tracks where the registry is deployed. + // Examples: production, staging, development + ReleaseStage string `yaml:"releasestage"` + // Endpoint is used for specifying an enterprise Bugsnag endpoint. + Endpoint string `yaml:"endpoint"` +} + +// NewRelicReporting configures error reporting for NewRelic (newrelic.com) +type NewRelicReporting struct { + // LicenseKey is the NewRelic user license key + LicenseKey string `yaml:"licensekey"` + // AppName is the component name of the registry in NewRelic + Name string `yaml:"name"` } // Parse parses an input configuration yaml document into a Configuration struct diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 0e227653..5c9ec9e7 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -29,6 +29,11 @@ var configStruct = Configuration{ "port": "", }, }, + Reporting: Reporting{ + Bugsnag: BugsnagReporting{ + APIKey: "BugsnagApiKey", + }, + }, } // configYamlV0_1 is a Version 0.1 yaml document representing configStruct @@ -46,6 +51,9 @@ storage: secretkey: SUPERSECRET host: ~ port: ~ +reporting: + bugsnag: + apikey: BugsnagApiKey ` // inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory storage driver with @@ -88,6 +96,7 @@ func (suite *ConfigSuite) TestParseSimple(c *C) { // parsed into a Configuration struct with no storage parameters func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} + suite.expectedConfig.Reporting = Reporting{} config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) c.Assert(err, IsNil) @@ -171,6 +180,22 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { + suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" + suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" + suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" + suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" + + os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") + os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") + os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") + + config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + // TestParseInvalidVersion validates that the parser will fail to parse a newer configuration // version than the CurrentVersion func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { @@ -190,6 +215,10 @@ func copyConfig(config Configuration) *Configuration { for k, v := range config.Storage.Parameters() { configCopy.Storage.setParameter(k, v) } + configCopy.Reporting = Reporting{ + Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, + NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name}, + } return configCopy } From a4f42b8eea56d74920c73e74107b195e47b3bfc7 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 12 Dec 2014 17:43:30 -0800 Subject: [PATCH 145/165] Relax requirement for size argument during blob upload During client implementation, it was found that requiring the size argument made client implementation more complex. The original benefit of the size argument was to provide an additional check alongside of tarsum to validate incoming data. For the purposes of the registry, it has been determined that tarsum should be enough to validate incoming content. At this time, the size check is optional but we may consider removing it completely. --- api_test.go | 5 ++++- layerupload.go | 30 +++++++++++++++++++++++------- storage/layer.go | 19 +++++++++++++++---- storage/layerupload.go | 19 ++++++++++++------- 4 files changed, 54 insertions(+), 19 deletions(-) diff --git a/api_test.go b/api_test.go index a650a102..6cf56344 100644 --- a/api_test.go +++ b/api_test.go @@ -460,7 +460,10 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, u.RawQuery = url.Values{ "digest": []string{dgst.String()}, - "size": []string{fmt.Sprint(rsLength)}, + + // TODO(stevvooe): Layer upload can be completed with and without size + // argument. We'll need to add a test that checks the latter path. + "size": []string{fmt.Sprint(rsLength)}, }.Encode() uploadURL := u.String() diff --git a/layerupload.go b/layerupload.go index b65c8ef2..91c3b5cc 100644 --- a/layerupload.go +++ b/layerupload.go @@ -119,9 +119,20 @@ func (luh *layerUploadHandler) PutLayerChunk(w http.ResponseWriter, r *http.Requ if err := luh.maybeCompleteUpload(w, r); err != nil { if err != errNotReadyToComplete { - w.WriteHeader(http.StatusInternalServerError) - luh.Errors.Push(v2.ErrorCodeUnknown, err) - return + switch err := err.(type) { + case storage.ErrLayerInvalidSize: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeSizeInvalid, err) + return + case storage.ErrLayerInvalidDigest: + w.WriteHeader(http.StatusBadRequest) + luh.Errors.Push(v2.ErrorCodeDigestInvalid, err) + return + default: + w.WriteHeader(http.StatusInternalServerError) + luh.Errors.Push(v2.ErrorCodeUnknown, err) + return + } } } @@ -173,7 +184,7 @@ func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *htt dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! sizeStr := r.FormValue("size") - if dgstStr == "" || sizeStr == "" { + if dgstStr == "" { return errNotReadyToComplete } @@ -182,9 +193,14 @@ func (luh *layerUploadHandler) maybeCompleteUpload(w http.ResponseWriter, r *htt return err } - size, err := strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return err + var size int64 + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return err + } + } else { + size = -1 } luh.completeUpload(w, r, size, dgst) diff --git a/storage/layer.go b/storage/layer.go index 2ad91314..84f83d2d 100644 --- a/storage/layer.go +++ b/storage/layer.go @@ -43,9 +43,14 @@ type LayerUpload interface { // Offset returns the position of the last byte written to this layer. Offset() int64 + // TODO(stevvooe): Consider completely removing the size check from this + // interface. The digest check may be adequate and we are making it + // optional in the HTTP API. + // Finish marks the upload as completed, returning a valid handle to the // uploaded layer. The final size and digest are validated against the - // contents of the uploaded layer. + // contents of the uploaded layer. If the size is negative, only the + // digest will be checked. Finish(size int64, digest digest.Digest) (Layer, error) // Cancel the layer upload process. @@ -62,9 +67,6 @@ var ( // ErrLayerUploadUnknown returned when upload is not found. ErrLayerUploadUnknown = fmt.Errorf("layer upload unknown") - // ErrLayerInvalidLength returned when length check fails. - ErrLayerInvalidLength = fmt.Errorf("invalid layer length") - // ErrLayerClosed returned when an operation is attempted on a closed // Layer or LayerUpload. ErrLayerClosed = fmt.Errorf("layer closed") @@ -87,3 +89,12 @@ type ErrLayerInvalidDigest struct { func (err ErrLayerInvalidDigest) Error() string { return fmt.Sprintf("invalid digest for referenced layer: %v", err.FSLayer.BlobSum) } + +// ErrLayerInvalidSize returned when length check fails. +type ErrLayerInvalidSize struct { + Size int64 +} + +func (err ErrLayerInvalidSize) Error() string { + return fmt.Sprintf("invalid layer size: %d", err.Size) +} diff --git a/storage/layerupload.go b/storage/layerupload.go index 3ee593b9..63ae5d6f 100644 --- a/storage/layerupload.go +++ b/storage/layerupload.go @@ -110,7 +110,7 @@ func (luc *layerUploadController) Finish(size int64, digest digest.Digest) (Laye if nn, err := luc.writeLayer(fp, digest); err != nil { // Cleanup? return nil, err - } else if nn != size { + } else if size >= 0 && nn != size { // TODO(stevvooe): Short write. Will have to delete the location and // report an error. This error needs to be reported to the client. return nil, fmt.Errorf("short write writing layer") @@ -252,9 +252,10 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d return "", err } - if end != size { + // Only check size if it is greater than + if size >= 0 && end != size { // Fast path length check. - return "", ErrLayerInvalidLength + return "", ErrLayerInvalidSize{Size: size} } // Now seek back to start and take care of the digest. @@ -262,8 +263,12 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d return "", err } - tr := io.TeeReader(fp, lengthVerifier) - tr = io.TeeReader(tr, digestVerifier) + tr := io.TeeReader(fp, digestVerifier) + + // Only verify the size if a positive size argument has been passed. + if size >= 0 { + tr = io.TeeReader(tr, lengthVerifier) + } // TODO(stevvooe): This is one of the places we need a Digester write // sink. Instead, its read driven. This migth be okay. @@ -274,8 +279,8 @@ func (luc *layerUploadController) validateLayer(fp layerFile, size int64, dgst d return "", err } - if !lengthVerifier.Verified() { - return "", ErrLayerInvalidLength + if size >= 0 && !lengthVerifier.Verified() { + return "", ErrLayerInvalidSize{Size: size} } if !digestVerifier.Verified() { From fc7b47cdae56a9a29d29eca24f6e1aefc1c554ac Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 12 Dec 2014 19:09:26 -0800 Subject: [PATCH 146/165] Port client to use URLBuilder to create urls This change ports the client use the URLBuilder to create urls. Without this, it produces broken urls for certain use cases. The client has also been updated to no longer use the size argument to complete blob uploads. Much of this work has been done after testing with the staging registry instance. --- client/client.go | 85 +++++++++++++++++++++++++++++-------------- client/client_test.go | 28 +++++++++----- 2 files changed, 75 insertions(+), 38 deletions(-) diff --git a/client/client.go b/client/client.go index e25fbff4..6616e54d 100644 --- a/client/client.go +++ b/client/client.go @@ -71,19 +71,33 @@ type Client interface { // New returns a new Client which operates against a registry with the // given base endpoint // This endpoint should not include /v2/ or any part of the url after this. -func New(endpoint string) Client { - return &clientImpl{endpoint} +func New(endpoint string) (Client, error) { + ub, err := v2.NewURLBuilderFromString(endpoint) + if err != nil { + return nil, err + } + + return &clientImpl{ + endpoint: endpoint, + ub: ub, + }, nil } // clientImpl is the default implementation of the Client interface type clientImpl struct { - Endpoint string + endpoint string + ub *v2.URLBuilder } // TODO(bbland): use consistent route generation between server and client func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest, error) { - response, err := http.Get(r.imageManifestURL(name, tag)) + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return nil, err + } + + response, err := http.Get(manifestURL) if err != nil { return nil, err } @@ -119,8 +133,12 @@ func (r *clientImpl) GetImageManifest(name, tag string) (*storage.SignedManifest } func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.SignedManifest) error { - putRequest, err := http.NewRequest("PUT", - r.imageManifestURL(name, tag), bytes.NewReader(manifest.Raw)) + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(manifest.Raw)) if err != nil { return err } @@ -150,8 +168,12 @@ func (r *clientImpl) PutImageManifest(name, tag string, manifest *storage.Signed } func (r *clientImpl) DeleteImage(name, tag string) error { - deleteRequest, err := http.NewRequest("DELETE", - r.imageManifestURL(name, tag), nil) + manifestURL, err := r.ub.BuildManifestURL(name, tag) + if err != nil { + return err + } + + deleteRequest, err := http.NewRequest("DELETE", manifestURL, nil) if err != nil { return err } @@ -184,7 +206,12 @@ func (r *clientImpl) DeleteImage(name, tag string) error { } func (r *clientImpl) ListImageTags(name string) ([]string, error) { - response, err := http.Get(fmt.Sprintf("%s/v2/%s/tags/list", r.Endpoint, name)) + tagsURL, err := r.ub.BuildTagsURL(name) + if err != nil { + return nil, err + } + + response, err := http.Get(tagsURL) if err != nil { return nil, err } @@ -222,7 +249,12 @@ func (r *clientImpl) ListImageTags(name string) ([]string, error) { } func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { - response, err := http.Head(fmt.Sprintf("%s/v2/%s/blobs/%s", r.Endpoint, name, dgst)) + blobURL, err := r.ub.BuildBlobURL(name, dgst) + if err != nil { + return -1, err + } + + response, err := http.Head(blobURL) if err != nil { return -1, err } @@ -254,8 +286,12 @@ func (r *clientImpl) BlobLength(name string, dgst digest.Digest) (int, error) { } func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (io.ReadCloser, int, error) { - getRequest, err := http.NewRequest("GET", - fmt.Sprintf("%s/v2/%s/blobs/%s", r.Endpoint, name, dgst), nil) + blobURL, err := r.ub.BuildBlobURL(name, dgst) + if err != nil { + return nil, 0, err + } + + getRequest, err := http.NewRequest("GET", blobURL, nil) if err != nil { return nil, 0, err } @@ -293,8 +329,12 @@ func (r *clientImpl) GetBlob(name string, dgst digest.Digest, byteOffset int) (i } func (r *clientImpl) InitiateBlobUpload(name string) (string, error) { - postRequest, err := http.NewRequest("POST", - fmt.Sprintf("%s/v2/%s/blobs/uploads/", r.Endpoint, name), nil) + uploadURL, err := r.ub.BuildBlobUploadURL(name) + if err != nil { + return "", err + } + + postRequest, err := http.NewRequest("POST", uploadURL, nil) if err != nil { return "", err } @@ -359,7 +399,6 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, } queryValues := url.Values{} - queryValues.Set("size", fmt.Sprint(length)) queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() @@ -394,8 +433,7 @@ func (r *clientImpl) UploadBlob(location string, blob io.ReadCloser, length int, func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, length, startByte int) error { defer blobChunk.Close() - putRequest, err := http.NewRequest("PUT", - fmt.Sprintf("%s%s", r.Endpoint, location), blobChunk) + putRequest, err := http.NewRequest("PUT", location, blobChunk) if err != nil { return err } @@ -443,14 +481,12 @@ func (r *clientImpl) UploadBlobChunk(location string, blobChunk io.ReadCloser, l } func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst digest.Digest) error { - putRequest, err := http.NewRequest("PUT", - fmt.Sprintf("%s%s", r.Endpoint, location), nil) + putRequest, err := http.NewRequest("PUT", location, nil) if err != nil { return err } queryValues := new(url.Values) - queryValues.Set("size", fmt.Sprint(length)) queryValues.Set("digest", dgst.String()) putRequest.URL.RawQuery = queryValues.Encode() @@ -485,8 +521,7 @@ func (r *clientImpl) FinishChunkedBlobUpload(location string, length int, dgst d } func (r *clientImpl) CancelBlobUpload(location string) error { - deleteRequest, err := http.NewRequest("DELETE", - fmt.Sprintf("%s%s", r.Endpoint, location), nil) + deleteRequest, err := http.NewRequest("DELETE", location, nil) if err != nil { return err } @@ -516,12 +551,6 @@ func (r *clientImpl) CancelBlobUpload(location string) error { } } -// imageManifestURL is a helper method for returning the full url to an image -// manifest -func (r *clientImpl) imageManifestURL(name, tag string) string { - return fmt.Sprintf("%s/v2/%s/manifests/%s", r.Endpoint, name, tag) -} - // parseRangeHeader parses out the offset and length from a returned Range // header func parseRangeHeader(byteRangeHeader string) (int, int, error) { diff --git a/client/client_test.go b/client/client_test.go index f3082141..0b4d023b 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -24,11 +24,11 @@ func TestPush(t *testing.T) { tag := "sometag" testBlobs := []testBlob{ { - digest: "12345", + digest: "tarsum.v2+sha256:12345", contents: []byte("some contents"), }, { - digest: "98765", + digest: "tarsum.v2+sha256:98765", contents: []byte("some other contents"), }, } @@ -80,7 +80,6 @@ func TestPush(t *testing.T) { Method: "PUT", Route: uploadLocations[i], QueryParams: map[string][]string{ - "length": {fmt.Sprint(len(blob.contents))}, "digest": {blob.digest.String()}, }, Body: blob.contents, @@ -114,7 +113,10 @@ func TestPush(t *testing.T) { }) server = httptest.NewServer(hack) - client := New(server.URL) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), manifestStorage: make(map[string]*storage.SignedManifest), @@ -150,11 +152,11 @@ func TestPull(t *testing.T) { tag := "sometag" testBlobs := []testBlob{ { - digest: "12345", + digest: "tarsum.v2+sha256:12345", contents: []byte("some contents"), }, { - digest: "98765", + digest: "tarsum.v2+sha256:98765", contents: []byte("some other contents"), }, } @@ -205,7 +207,10 @@ func TestPull(t *testing.T) { }, })) server := httptest.NewServer(handler) - client := New(server.URL) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), manifestStorage: make(map[string]*storage.SignedManifest), @@ -259,11 +264,11 @@ func TestPullResume(t *testing.T) { tag := "sometag" testBlobs := []testBlob{ { - digest: "12345", + digest: "tarsum.v2+sha256:12345", contents: []byte("some contents"), }, { - digest: "98765", + digest: "tarsum.v2+sha256:98765", contents: []byte("some other contents"), }, } @@ -329,7 +334,10 @@ func TestPullResume(t *testing.T) { handler := testutil.NewHandler(layerRequestResponseMappings) server := httptest.NewServer(handler) - client := New(server.URL) + client, err := New(server.URL) + if err != nil { + t.Fatalf("error creating client: %v", err) + } objectStore := &memoryObjectStore{ mutex: new(sync.Mutex), manifestStorage: make(map[string]*storage.SignedManifest), From 6e846d6b29faabb2f6bd0571b6a7a2e7f1dcbb96 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 15 Dec 2014 11:53:52 -0800 Subject: [PATCH 147/165] Use filesystem driver in default configuration --- cmd/registry/config.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/registry/config.yml b/cmd/registry/config.yml index 818bedc2..bf79ca8f 100644 --- a/cmd/registry/config.yml +++ b/cmd/registry/config.yml @@ -1,5 +1,7 @@ version: 0.1 loglevel: debug -storage: inmemory +storage: + filesystem: + rootdirectory: /tmp/registry-dev http: addr: :5000 From c51ea97234f25fd2ac70edd9de30416445f194d9 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 15 Dec 2014 14:17:51 -0800 Subject: [PATCH 148/165] Allows HTTP bind address to be overridden by an environment variable Uses REGISTRY_HTTP_ADDR --- configuration/configuration.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/configuration/configuration.go b/configuration/configuration.go index ab962a90..43405279 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -311,6 +311,10 @@ func parseV0_1Registry(in []byte) (*Configuration, error) { config.Reporting.NewRelic.Name = newRelicName } + if httpAddr, ok := envMap["REGISTRY_HTTP_ADDR"]; ok { + config.HTTP.Addr = httpAddr + } + return (*Configuration)(&config), nil } From 049bf61912166c8f4dbe8fc0b8bc5f89285a49a6 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Mon, 15 Dec 2014 17:41:30 -0800 Subject: [PATCH 149/165] Fix test --- circle.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/circle.yml b/circle.yml index 88125a8d..e205f7b2 100644 --- a/circle.yml +++ b/circle.yml @@ -18,9 +18,9 @@ test: pre: - go version override: - - test -z $(gofmt -s -l . | tee /dev/stderr) + - test -z "$(gofmt -s -l . | tee /dev/stderr)" - go vet ./... - - test -z $(golint ./... | tee /dev/stderr) + - test -z "$(golint ./... | tee /dev/stderr)" - go test -test.v -test.short ./... # Disabling the race detector due to massive memory usage. From 1a6893dbb6a05b80e685ba9e48fc95379ce52647 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Mon, 15 Dec 2014 15:45:02 -0800 Subject: [PATCH 150/165] Adds pre-commit hook, hook config script, and a README The pre-commit hook will automatically gofmt code in place, warning you about any changes. It will also fail to commit if either golint or go vet fails. --- project/hooks/README.md | 6 ++++++ project/hooks/configure-hooks.sh | 18 ++++++++++++++++++ project/hooks/pre-commit | 29 +++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 project/hooks/README.md create mode 100755 project/hooks/configure-hooks.sh create mode 100755 project/hooks/pre-commit diff --git a/project/hooks/README.md b/project/hooks/README.md new file mode 100644 index 00000000..eda88696 --- /dev/null +++ b/project/hooks/README.md @@ -0,0 +1,6 @@ +Git Hooks +========= + +To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. + +As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/project/hooks/configure-hooks.sh b/project/hooks/configure-hooks.sh new file mode 100755 index 00000000..6afea8a1 --- /dev/null +++ b/project/hooks/configure-hooks.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +cd $(dirname $0) + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +set -e +set -x + +# Just in case the directory doesn't exist +mkdir -p $REPO_ROOT/.git/hooks + +ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/project/hooks/pre-commit b/project/hooks/pre-commit new file mode 100755 index 00000000..3ee2e913 --- /dev/null +++ b/project/hooks/pre-commit @@ -0,0 +1,29 @@ +#!/bin/sh + +REPO_ROOT=$(git rev-parse --show-toplevel) +RESOLVE_REPO_ROOT_STATUS=$? +if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then + printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr + exit $RESOLVE_REPO_ROOT_STATUS +fi + +cd $REPO_ROOT + +GOFMT_ERRORS=$(gofmt -s -l . 2>&1) +if [ -n "$GOFMT_ERRORS" ]; then + printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr + exit 1 +fi + +GOLINT_ERRORS=$(golint ./... 2>&1) +if [ -n "$GOLINT_ERRORS" ]; then + printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr + exit 1 +fi + +GOVET_ERRORS=$(go vet ./... 2>&1) +GOVET_STATUS=$? +if [ "$GOVET_STATUS" -ne "0" ]; then + printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr + exit $GOVET_STATUS +fi From a7b8e4fda05be0690ac84fda4065e6d5b83deccd Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Tue, 16 Dec 2014 12:01:27 -0800 Subject: [PATCH 151/165] Adds benchmarks for Put/Get, WriteStream/ReadStream, List, and Delete Also fixes an open fd leak in the filesystem driver that these benchmarks revealed. --- storagedriver/filesystem/driver.go | 2 + storagedriver/testsuites/testsuites.go | 141 ++++++++++++++++++++++++- 2 files changed, 142 insertions(+), 1 deletion(-) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 49a94a50..3c3c950f 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -204,6 +204,8 @@ func (d *Driver) List(subPath string) ([]string, error) { return nil, err } + defer dir.Close() + fileNames, err := dir.Readdirnames(0) if err != nil { return nil, err diff --git a/storagedriver/testsuites/testsuites.go b/storagedriver/testsuites/testsuites.go index c83bf605..64aa1e81 100644 --- a/storagedriver/testsuites/testsuites.go +++ b/storagedriver/testsuites/testsuites.go @@ -139,7 +139,7 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { // storage driver. func (suite *DriverSuite) TestInvalidPaths(c *check.C) { contents := randomContents(64) - invalidFiles := []string{"/", "abc", "/abc./abc", "/.abc", "/a--b", "/a-.b", "/_.abc", "/a/bcd", "/abc_123/d", "/Docker/docker-registry"} + invalidFiles := []string{"", "/", "abc", "123.abc", "/abc./abc", "/.abc", "/a--b", "/a-.b", "/_.abc", "/a/bcd", "/abc_123/d", "/Docker/docker-registry"} for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(filename, contents) @@ -763,6 +763,142 @@ func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { wg.Wait() } +// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files +func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 0) +} + +// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files +func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024) +} + +// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files +func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024) +} + +// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files +func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { + suite.benchmarkPutGetFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + err := suite.StorageDriver.PutContent(filename, randomContents(size)) + c.Assert(err, check.IsNil) + + _, err = suite.StorageDriver.GetContent(filename) + c.Assert(err, check.IsNil) + } +} + +// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files +func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 0) +} + +// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files +func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024) +} + +// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files +func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024) +} + +// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files +func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { + suite.benchmarkStreamFiles(c, 1024*1024*1024) +} + +func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { + c.SetBytes(size) + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := 0; i < c.N; i++ { + filename := path.Join(parentDir, randomPath(32)) + written, err := suite.StorageDriver.WriteStream(filename, 0, bytes.NewReader(randomContents(size))) + c.Assert(err, check.IsNil) + c.Assert(written, check.Equals, size) + + rc, err := suite.StorageDriver.ReadStream(filename, 0) + c.Assert(err, check.IsNil) + rc.Close() + } +} + +// BenchmarkList5Files benchmarks List for 5 small files +func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { + suite.benchmarkListFiles(c, 5) +} + +// BenchmarkList50Files benchmarks List for 50 small files +func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { + suite.benchmarkListFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { + parentDir := randomPath(8) + defer func() { + c.StopTimer() + suite.StorageDriver.Delete(firstPart(parentDir)) + }() + + for i := int64(0); i < numFiles; i++ { + err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + + c.ResetTimer() + for i := 0; i < c.N; i++ { + files, err := suite.StorageDriver.List(parentDir) + c.Assert(err, check.IsNil) + c.Assert(int64(len(files)), check.Equals, numFiles) + } +} + +// BenchmarkDelete5Files benchmarks Delete for 5 small files +func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 5) +} + +// BenchmarkDelete50Files benchmarks Delete for 50 small files +func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { + suite.benchmarkDeleteFiles(c, 50) +} + +func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { + for i := 0; i < c.N; i++ { + parentDir := randomPath(8) + defer suite.StorageDriver.Delete(firstPart(parentDir)) + + c.StopTimer() + for j := int64(0); j < numFiles; j++ { + err := suite.StorageDriver.PutContent(path.Join(parentDir, randomPath(32)), nil) + c.Assert(err, check.IsNil) + } + c.StartTimer() + + // This is the operation we're benchmarking + err := suite.StorageDriver.Delete(firstPart(parentDir)) + c.Assert(err, check.IsNil) + } +} + func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf, err := ioutil.TempFile("", "tf") c.Assert(err, check.IsNil) @@ -867,6 +1003,9 @@ func randomContents(length int64) []byte { } func firstPart(filePath string) string { + if filePath == "" { + return "/" + } for { if filePath[len(filePath)-1] == '/' { filePath = filePath[:len(filePath)-1] From 26d5bece6b95a20fbd9ddfecb9f1ee99036c743f Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Tue, 16 Dec 2014 11:38:08 -0800 Subject: [PATCH 152/165] Multi-go test --- circle.yml | 134 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 93 insertions(+), 41 deletions(-) diff --git a/circle.yml b/circle.yml index e205f7b2..0d75d2ac 100644 --- a/circle.yml +++ b/circle.yml @@ -1,40 +1,112 @@ machine: pre: - - curl -o go.tar.gz -sL https://golang.org/dl/go1.4rc2.linux-amd64.tar.gz - - sudo rm -rf /usr/local/go - - sudo tar -C /usr/local -xzf go.tar.gz - - sudo chmod a+w /usr/local/go/src/ + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + + post: + # Install many go versions + - gvm install go1.3.3 -B --name=old + - gvm install go1.4 -B --name=stable + # - gvm install tip --name=bleed + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR + # Workaround Circle parsing madness bugs + CIRCLE_PAIN: "mode: set" hosts: + # Not used yet fancy: 127.0.0.1 dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use old && + mkdir -p "$(dirname $BASE_OLD)" && + cp -R "$CHECKOUT" "$BASE_OLD" + + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + # - > + # gvm use bleed && + # mkdir -p "$(dirname $BASE_BLEED)" && + # cp -R "$CHECKOUT" "$BASE_BLEED" + + override: + # Install dependencies for every copied clone/go version + - gvm use old && go get -t -d -v ./...: + pwd: $BASE_OLD + + - gvm use stable && go get -t -d -v ./...: + pwd: $BASE_STABLE + + # - gvm use bleed && go get -t -d -v ./...: + # pwd: $BASE_BLEED + post: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get github.com/golang/lint/golint + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint test: pre: - - go version + # Output the go versions we are going to test + - gvm use old && go version + - gvm use stable && go version + # - gvm use bleed && go version + + # Preset the goverall report file + - echo "$CIRCLE_PAIN" > ~/goverage.report + + # FMT + - gvm use stable && test -z "$(gofmt -s -l . | tee /dev/stderr)": + pwd: $BASE_STABLE + + # VET + - gvm use stable && go vet ./...: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && test -z "$(golint ./... | tee /dev/stderr)": + pwd: $BASE_STABLE + override: - - test -z "$(gofmt -s -l . | tee /dev/stderr)" - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - go test -test.v -test.short ./... + # Test every version we have (but stable) + - gvm use old; go test -test.v -test.short ./...: + timeout: 600 + pwd: $BASE_OLD - # Disabling the race detector due to massive memory usage. - # - go test -race -test.v ./...: + # - gvm use bleed; go test -test.v -test.short ./...: # timeout: 600 + # pwd: $BASE_BLEED - # TODO(stevvooe): The following is an attempt at using goveralls but it - # just doesn't work. goveralls requires a single profile file to be - # submitted at once, but can't run all of the tests for all the packages - # at once. The command below attempts to fix this but fails because it - # creates a new "job" for run of coveralls, making it so that the coverage - # is partially reported a large number of times. + # Test stable, and report + - gvm use stable; go list ./... | go test -test.short -coverprofile=coverage.out && cat coverage.out | sed -n '1!p' >> ~/goverage.report: + timeout: 600 + pwd: $BASE_STABLE - # - cd $HOME/.go_project/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME && go list ./... | xargs -I{} goveralls -service circleci -repotoken $COVERALLS_TOKEN -race {} + post: + # Report to coveralls + - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN: + pwd: $BASE_STABLE + + ## Notes + # Disabled the -race detector due to massive memory usage. + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck general: branches: @@ -44,23 +116,3 @@ general: - 0.8 - 0.9 - 1.0 - # This doesn't work, but it would be nice if it did. - # build_dir: ../.go_project/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - -# notify: -# email: -# recipients: -# - distribution@docker.com - -# slack: -# team: docker -# channel: "#dt" -# username: mom -# token: {{SLACK_TOKEN}} -# on_success: false -# on_failure: true - - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck From 56f685c0dd5ccf6d6ea258f5ee6ce0713652f32b Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Tue, 16 Dec 2014 22:58:39 -0800 Subject: [PATCH 153/165] Adds auth package with token auth backend Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/auth.go | 76 +++++++ auth/token/accesscontroller.go | 268 ++++++++++++++++++++++++ auth/token/token.go | 338 ++++++++++++++++++++++++++++++ auth/token/token_test.go | 365 +++++++++++++++++++++++++++++++++ auth/token/util.go | 77 +++++++ 5 files changed, 1124 insertions(+) create mode 100644 auth/auth.go create mode 100644 auth/token/accesscontroller.go create mode 100644 auth/token/token.go create mode 100644 auth/token/token_test.go create mode 100644 auth/token/util.go diff --git a/auth/auth.go b/auth/auth.go new file mode 100644 index 00000000..bd31c56a --- /dev/null +++ b/auth/auth.go @@ -0,0 +1,76 @@ +package auth + +import ( + "fmt" + "net/http" +) + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given recource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + Status() int + SetHeader(header http.Header) + ServeHTTP(w http.ResponseWriter, r *http.Request) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns non-nil if the request is granted the request + // access. If the error is non-nil, access should always be denied. The + // error may be of type Challenge, in which case the caller may have the + // Challenge handle the request or choose what action to take based on the + // Challenge header or response status. + // + // In the future, other error types, besides Challenge, may be added to + // support more complex authorization flows. + Authorized(req *http.Request, access ...Access) error +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the contsructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/auth/token/accesscontroller.go b/auth/token/accesscontroller.go new file mode 100644 index 00000000..52e06912 --- /dev/null +++ b/auth/token/accesscontroller.go @@ -0,0 +1,268 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" + + "github.com/docker/docker-registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set := accessSet[resource] + if set == nil { + set = make(actionSet) + accessSet[resource] = set + } + + set[access.Action] = struct{}{} + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +// Error returns the internal error string for this authChallenge. +func (ac *authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac *authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac *authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%s,service=%s", strconv.Quote(ac.realm), strconv.Quote(ac.service)) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%s", str, strconv.Quote(scope)) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%s", str, strconv.Quote("invalid_token")) + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%s", str, strconv.Quote("insufficient_scope")) + } + + return str +} + +// SetHeader sets the WWW-Authenticate value for the given header. +func (ac *authChallenge) SetHeader(header http.Header) { + header.Add(http.CanonicalHeaderKey("WWW-Authenticate"), ac.challengeParams()) +} + +// ServeHttp handles writing the challenge response +// by setting the challenge header and status code. +func (ac *authChallenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ac.SetHeader(w.Header()) + w.WriteHeader(ac.Status()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootCertBundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Access) error { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: newStringSet(ac.issuer), + AccpetedAudiences: newStringSet(ac.service), + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return challenge + } + } + + return nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/auth/token/token.go b/auth/token/token.go new file mode 100644 index 00000000..d1baafe6 --- /dev/null +++ b/auth/token/token.go @@ -0,0 +1,338 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/docker-registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + RawJWK json.RawMessage `json:"jwk"` + SigningKey libtrust.PublicKey `json:"-"` +} + +// CheckSigningKey parses the `jwk` field of a JOSE header and sets the +// SigningKey field if it is valid. +func (h *Header) CheckSigningKey() (err error) { + if len(h.RawJWK) == 0 { + // No signing key was specified. + return + } + + h.SigningKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(h.RawJWK)) + h.RawJWK = nil // Don't need this anymore! + + return +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte + Valid bool +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers stringSet + AccpetedAudiences stringSet + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Errorf("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = token.Header.CheckSigningKey(); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + if t.Valid { + // Token was already verified. + return nil + } + + // Verify that the Issuer claim is a trusted authority. + if !verifyOpts.TrustedIssuers.contains(t.Claims.Issuer) { + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !verifyOpts.AccpetedAudiences.contains(t.Claims.Audience) { + log.Errorf("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentUnixTime := time.Now().Unix() + if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Error("token has no signature") + return ErrInvalidToken + } + + // If the token header has a SigningKey field, verify the signature + // using that key and its included x509 certificate chain if necessary. + // If the Header's SigningKey field is nil, try using the KeyID field. + signingKey := t.Header.SigningKey + + if signingKey == nil { + // Find the key in the given collection of trusted keys. + trustedKey, ok := verifyOpts.TrustedKeys[t.Header.KeyID] + if !ok { + log.Errorf("token signed by untrusted key with ID: %q", t.Header.KeyID) + return ErrInvalidToken + } + signingKey = trustedKey + } + + // First verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Errorf("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + // Next, check if the signing key is one of the trusted keys. + if _, isTrustedKey := verifyOpts.TrustedKeys[signingKey.KeyID()]; isTrustedKey { + // We're done! The token was signed by a trusted key and has been verified! + t.Valid = true + return nil + } + + // Otherwise, we need to check the sigining keys included certificate chain. + return t.verifyCertificateChain(signingKey, verifyOpts.Roots) +} + +// verifyCertificateChain attempts to verify the token using the "x5c" field +// of the given leafKey which was used to sign it. Returns a nil error if +// the key's certificate chain is valid and rooted an one of the given roots. +func (t *Token) verifyCertificateChain(leafKey libtrust.PublicKey, roots *x509.CertPool) error { + // In this case, the token signature is valid, but the key that signed it + // is not in our set of trusted keys. So, we'll need to check if the + // token's signing key included an x509 certificate chain that can be + // verified up to one of our trusted roots. + x5cVal, ok := leafKey.GetExtendedField("x5c").([]interface{}) + if !ok || x5cVal == nil { + log.Error("unable to verify token signature: signed by untrusted key with no valid certificate chain") + return ErrInvalidToken + } + + // Ensure each item is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + log.Error("unable to verify token signature: signed by untrusted key with malformed certificate chain") + return ErrInvalidToken + } + x5c[i] = certString + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + log.Errorf("unable to decode signing key leaf cert: %s", err) + return ErrInvalidToken + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + log.Errorf("unable to parse signing key leaf cert: %s", err) + return ErrInvalidToken + } + + // Verify that the public key in the leaf cert *is* the signing key. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + log.Error("unable to get signing key leaf cert public key value") + return ErrInvalidToken + } + + leafPubKey, err := libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + log.Errorf("unable to make libtrust public key from signing key leaf cert: %s", err) + return ErrInvalidToken + } + + if leafPubKey.KeyID() != leafKey.KeyID() { + log.Error("token signing key ID and leaf certificate public key ID do not match") + return ErrInvalidToken + } + + // The rest of the x5c array are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + log.Errorf("unable to decode signing key intermediate cert: %s", err) + return ErrInvalidToken + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + log.Errorf("unable to parse signing key intermediate cert: %s", err) + return ErrInvalidToken + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + log.Errorf("unable to verify signing key certificate: %s", err) + return ErrInvalidToken + } + + // The signing key's x509 chain is valid! + t.Valid = true + return nil +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set := accessSet[resource] + if set == nil { + set = make(actionSet) + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set[action] = struct{}{} + } + } + + return accessSet +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/auth/token/token_test.go b/auth/token/token_test.go new file mode 100644 index 00000000..be7470c9 --- /dev/null +++ b/auth/token/token_test.go @@ -0,0 +1,365 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker-registry/auth" + "github.com/docker/libtrust" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + } + + rawJWK, err := signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: json.RawMessage(rawJWK), + } + + now := time.Now() + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: now.Add(5 * time.Minute).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: newStringSet(issuer), + AccpetedAudiences: newStringSet(audience), + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + if !token.Valid { + t.Fatal("token not marked as Valid") + } + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootCertBundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + err = accessController.Authorized(req, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + err = accessController.Authorized(req, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + err = accessController.Authorized(req, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + if err = accessController.Authorized(req, testAccess); err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } +} diff --git a/auth/token/util.go b/auth/token/util.go new file mode 100644 index 00000000..94078fb8 --- /dev/null +++ b/auth/token/util.go @@ -0,0 +1,77 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// stringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +func newStringSet(strs ...string) stringSet { + set := make(stringSet, len(strs)) + for _, str := range strs { + set[str] = struct{}{} + } + + return set +} + +// contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// keys returns a slice of all keys in this stringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} + +// actionSet is a special type of stringSet. +type actionSet stringSet + +// contains calls stringSet.contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + ss := stringSet(s) + + return ss.contains("*") || ss.contains(action) +} + +// keys wraps stringSet.keys() +func (s actionSet) keys() []string { + return stringSet(s).keys() +} From 88de2e11fb5f79135ec64761a3c26af4e607488f Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 17 Dec 2014 10:57:05 -0800 Subject: [PATCH 154/165] Refactor auth stringSet into common.StringSet Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/token/accesscontroller.go | 20 +++++++------ auth/token/token.go | 17 +++++------ auth/token/token_test.go | 8 ++++-- auth/token/util.go | 52 ++++++++-------------------------- common/stringset.go | 35 +++++++++++++++++++++++ 5 files changed, 72 insertions(+), 60 deletions(-) create mode 100644 common/stringset.go diff --git a/auth/token/accesscontroller.go b/auth/token/accesscontroller.go index 52e06912..66bf1c31 100644 --- a/auth/token/accesscontroller.go +++ b/auth/token/accesscontroller.go @@ -12,8 +12,10 @@ import ( "strconv" "strings" - "github.com/docker/docker-registry/auth" "github.com/docker/libtrust" + + "github.com/docker/docker-registry/auth" + "github.com/docker/docker-registry/common" ) // accessSet maps a typed, named resource to @@ -31,13 +33,13 @@ func newAccessSet(accessItems ...auth.Access) accessSet { Name: access.Name, } - set := accessSet[resource] - if set == nil { - set = make(actionSet) + set, exists := accessSet[resource] + if !exists { + set = newActionSet() accessSet[resource] = set } - set[access.Action] = struct{}{} + set.Add(access.Action) } return accessSet @@ -47,7 +49,7 @@ func newAccessSet(accessItems ...auth.Access) accessSet { func (s accessSet) contains(access auth.Access) bool { actionSet, ok := s[access.Resource] if ok { - return actionSet.contains(access.Action) + return actionSet.Contains(access.Action) } return false @@ -60,7 +62,7 @@ func (s accessSet) scopeParam() string { scopes := make([]string, 0, len(s)) for resource, actionSet := range s { - actions := strings.Join(actionSet.keys(), ",") + actions := strings.Join(actionSet.Keys(), ",") scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) } @@ -240,8 +242,8 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac } verifyOpts := VerifyOptions{ - TrustedIssuers: newStringSet(ac.issuer), - AccpetedAudiences: newStringSet(ac.service), + TrustedIssuers: common.NewStringSet(ac.issuer), + AccpetedAudiences: common.NewStringSet(ac.service), Roots: ac.rootCerts, TrustedKeys: ac.trustedKeys, } diff --git a/auth/token/token.go b/auth/token/token.go index d1baafe6..d1b0d67a 100644 --- a/auth/token/token.go +++ b/auth/token/token.go @@ -14,6 +14,7 @@ import ( "github.com/docker/libtrust" "github.com/docker/docker-registry/auth" + "github.com/docker/docker-registry/common" ) const ( @@ -85,8 +86,8 @@ type Token struct { // VerifyOptions is used to specify // options when verifying a JSON Web Token. type VerifyOptions struct { - TrustedIssuers stringSet - AccpetedAudiences stringSet + TrustedIssuers common.StringSet + AccpetedAudiences common.StringSet Roots *x509.CertPool TrustedKeys map[string]libtrust.PublicKey } @@ -155,13 +156,13 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { } // Verify that the Issuer claim is a trusted authority. - if !verifyOpts.TrustedIssuers.contains(t.Claims.Issuer) { + if !verifyOpts.TrustedIssuers.Contains(t.Claims.Issuer) { log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) return ErrInvalidToken } // Verify that the Audience claim is allowed. - if !verifyOpts.AccpetedAudiences.contains(t.Claims.Audience) { + if !verifyOpts.AccpetedAudiences.Contains(t.Claims.Audience) { log.Errorf("token intended for another audience: %q", t.Claims.Audience) return ErrInvalidToken } @@ -319,14 +320,14 @@ func (t *Token) accessSet() accessSet { Name: resourceActions.Name, } - set := accessSet[resource] - if set == nil { - set = make(actionSet) + set, exists := accessSet[resource] + if !exists { + set = newActionSet() accessSet[resource] = set } for _, action := range resourceActions.Actions { - set[action] = struct{}{} + set.Add(action) } } diff --git a/auth/token/token_test.go b/auth/token/token_test.go index be7470c9..da466dde 100644 --- a/auth/token/token_test.go +++ b/auth/token/token_test.go @@ -15,8 +15,10 @@ import ( "testing" "time" - "github.com/docker/docker-registry/auth" "github.com/docker/libtrust" + + "github.com/docker/docker-registry/auth" + "github.com/docker/docker-registry/common" ) func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { @@ -194,8 +196,8 @@ func TestTokenVerify(t *testing.T) { } verifyOps := VerifyOptions{ - TrustedIssuers: newStringSet(issuer), - AccpetedAudiences: newStringSet(audience), + TrustedIssuers: common.NewStringSet(issuer), + AccpetedAudiences: common.NewStringSet(audience), Roots: rootPool, TrustedKeys: trustedKeys, } diff --git a/auth/token/util.go b/auth/token/util.go index 94078fb8..7ec52cef 100644 --- a/auth/token/util.go +++ b/auth/token/util.go @@ -4,6 +4,8 @@ import ( "encoding/base64" "errors" "strings" + + "github.com/docker/docker-registry/common" ) // joseBase64UrlEncode encodes the given data using the standard base64 url @@ -31,47 +33,17 @@ func joseBase64UrlDecode(s string) ([]byte, error) { return base64.URLEncoding.DecodeString(s) } -// stringSet is a useful type for looking up strings. -type stringSet map[string]struct{} - -func newStringSet(strs ...string) stringSet { - set := make(stringSet, len(strs)) - for _, str := range strs { - set[str] = struct{}{} - } - - return set -} - -// contains returns whether the given key is in this StringSet. -func (ss stringSet) contains(key string) bool { - _, ok := ss[key] - return ok -} - -// keys returns a slice of all keys in this stringSet. -func (ss stringSet) keys() []string { - keys := make([]string, 0, len(ss)) - - for key := range ss { - keys = append(keys, key) - } - - return keys -} - // actionSet is a special type of stringSet. -type actionSet stringSet +type actionSet struct { + common.StringSet +} -// contains calls stringSet.contains() for +func newActionSet(actions ...string) actionSet { + return actionSet{common.NewStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for // either "*" or the given action string. -func (s actionSet) contains(action string) bool { - ss := stringSet(s) - - return ss.contains("*") || ss.contains(action) -} - -// keys wraps stringSet.keys() -func (s actionSet) keys() []string { - return stringSet(s).keys() +func (s actionSet) Contains(action string) bool { + return s.StringSet.Contains("*") || s.StringSet.Contains(action) } diff --git a/common/stringset.go b/common/stringset.go new file mode 100644 index 00000000..36f4ba5a --- /dev/null +++ b/common/stringset.go @@ -0,0 +1,35 @@ +package common + +// StringSet is a useful type for looking up strings. +type StringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func NewStringSet(keys ...string) StringSet { + ss := make(StringSet, len(keys)) + ss.Add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss StringSet) Add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss StringSet) Contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss StringSet) Keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} From d30a8321d8d2561dda41769a5e612462338d60c1 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 17 Dec 2014 11:35:35 -0800 Subject: [PATCH 155/165] Address auth package comments from stevvooe Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/auth.go | 51 +++++++++++++++++++++++++++------- auth/token/accesscontroller.go | 2 +- auth/token/token.go | 11 ++------ auth/token/token_test.go | 3 -- 4 files changed, 44 insertions(+), 23 deletions(-) diff --git a/auth/auth.go b/auth/auth.go index bd31c56a..eb7332e7 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -1,3 +1,33 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the requset is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if err := accessController.Authorized(r, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.ServeHTTP(w, r) +// } else { +// // Some other error. +// } +// } +// } +// package auth import ( @@ -23,8 +53,11 @@ type Access struct { // header values based on the error. type Challenge interface { error - Status() int - SetHeader(header http.Header) + // ServeHTTP prepares the request to conduct the appropriate challenge + // response. For most implementations, simply calling ServeHTTP should be + // sufficient. Because no body is written, users may write a custom body after + // calling ServeHTTP, but any headers must be written before the call and may + // be overwritten. ServeHTTP(w http.ResponseWriter, r *http.Request) } @@ -32,14 +65,12 @@ type Challenge interface { // and required access levels for a request. Implementations can support both // complete denial and http authorization challenges. type AccessController interface { - // Authorized returns non-nil if the request is granted the request - // access. If the error is non-nil, access should always be denied. The - // error may be of type Challenge, in which case the caller may have the - // Challenge handle the request or choose what action to take based on the - // Challenge header or response status. - // - // In the future, other error types, besides Challenge, may be added to - // support more complex authorization flows. + // Authorized returns non-nil if the request is granted access. If one or + // more Access structs are provided, the requested access will be compared + // with what is available to the request. If the error is non-nil, access + // should always be denied. The error may be of type Challenge, in which + // case the caller may have the Challenge handle the request or choose + // what action to take based on the Challenge header or response status. Authorized(req *http.Request, access ...Access) error } diff --git a/auth/token/accesscontroller.go b/auth/token/accesscontroller.go index 66bf1c31..09d78a71 100644 --- a/auth/token/accesscontroller.go +++ b/auth/token/accesscontroller.go @@ -114,7 +114,7 @@ func (ac *authChallenge) challengeParams() string { // SetHeader sets the WWW-Authenticate value for the given header. func (ac *authChallenge) SetHeader(header http.Header) { - header.Add(http.CanonicalHeaderKey("WWW-Authenticate"), ac.challengeParams()) + header.Add("WWW-Authenticate", ac.challengeParams()) } // ServeHttp handles writing the challenge response diff --git a/auth/token/token.go b/auth/token/token.go index d1b0d67a..2c1114a6 100644 --- a/auth/token/token.go +++ b/auth/token/token.go @@ -80,7 +80,6 @@ type Token struct { Header *Header Claims *ClaimSet Signature []byte - Valid bool } // VerifyOptions is used to specify @@ -150,11 +149,6 @@ func NewToken(rawToken string) (*Token, error) { // Verify attempts to verify this token using the given options. // Returns a nil error if the token is valid. func (t *Token) Verify(verifyOpts VerifyOptions) error { - if t.Valid { - // Token was already verified. - return nil - } - // Verify that the Issuer claim is a trusted authority. if !verifyOpts.TrustedIssuers.Contains(t.Claims.Issuer) { log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) @@ -203,8 +197,8 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { // Next, check if the signing key is one of the trusted keys. if _, isTrustedKey := verifyOpts.TrustedKeys[signingKey.KeyID()]; isTrustedKey { - // We're done! The token was signed by a trusted key and has been verified! - t.Valid = true + // We're done! The token was signed by + // a trusted key and has been verified! return nil } @@ -301,7 +295,6 @@ func (t *Token) verifyCertificateChain(leafKey libtrust.PublicKey, roots *x509.C } // The signing key's x509 chain is valid! - t.Valid = true return nil } diff --git a/auth/token/token_test.go b/auth/token/token_test.go index da466dde..c1e0d2ad 100644 --- a/auth/token/token_test.go +++ b/auth/token/token_test.go @@ -206,9 +206,6 @@ func TestTokenVerify(t *testing.T) { if err := token.Verify(verifyOps); err != nil { t.Fatal(err) } - if !token.Valid { - t.Fatal("token not marked as Valid") - } } } From 454d4e918a824ad8821dcc4358f58cfdfe07a706 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 17 Dec 2014 12:04:18 -0800 Subject: [PATCH 156/165] Fixes Dockerfile "go get" command Now pulls down all dependencies for registry sub-packages --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6297b38d..8ab4e7f8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM golang COPY . /go/src/github.com/docker/docker-registry # Fetch any dependencies to run the registry -RUN go get github.com/docker/docker-registry +RUN go get github.com/docker/docker-registry/... RUN go install github.com/docker/docker-registry/cmd/registry ENV CONFIG_PATH /etc/docker/registry/config.yml @@ -11,4 +11,4 @@ COPY ./cmd/registry/config.yml $CONFIG_PATH EXPOSE 5000 ENV PATH /go/bin -CMD registry $CONFIG_PATH \ No newline at end of file +CMD registry $CONFIG_PATH From b54bf450dcd4a8a1602ecc2225fcd75577ad16df Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 17 Dec 2014 12:16:32 -0800 Subject: [PATCH 157/165] Fixes typo on auth/token VerifyOptions field Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/token/accesscontroller.go | 2 +- auth/token/token.go | 4 ++-- auth/token/token_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/auth/token/accesscontroller.go b/auth/token/accesscontroller.go index 09d78a71..f7ca4d52 100644 --- a/auth/token/accesscontroller.go +++ b/auth/token/accesscontroller.go @@ -243,7 +243,7 @@ func (ac *accessController) Authorized(req *http.Request, accessItems ...auth.Ac verifyOpts := VerifyOptions{ TrustedIssuers: common.NewStringSet(ac.issuer), - AccpetedAudiences: common.NewStringSet(ac.service), + AcceptedAudiences: common.NewStringSet(ac.service), Roots: ac.rootCerts, TrustedKeys: ac.trustedKeys, } diff --git a/auth/token/token.go b/auth/token/token.go index 2c1114a6..568b257a 100644 --- a/auth/token/token.go +++ b/auth/token/token.go @@ -86,7 +86,7 @@ type Token struct { // options when verifying a JSON Web Token. type VerifyOptions struct { TrustedIssuers common.StringSet - AccpetedAudiences common.StringSet + AcceptedAudiences common.StringSet Roots *x509.CertPool TrustedKeys map[string]libtrust.PublicKey } @@ -156,7 +156,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { } // Verify that the Audience claim is allowed. - if !verifyOpts.AccpetedAudiences.Contains(t.Claims.Audience) { + if !verifyOpts.AcceptedAudiences.Contains(t.Claims.Audience) { log.Errorf("token intended for another audience: %q", t.Claims.Audience) return ErrInvalidToken } diff --git a/auth/token/token_test.go b/auth/token/token_test.go index c1e0d2ad..13d7cede 100644 --- a/auth/token/token_test.go +++ b/auth/token/token_test.go @@ -197,7 +197,7 @@ func TestTokenVerify(t *testing.T) { verifyOps := VerifyOptions{ TrustedIssuers: common.NewStringSet(issuer), - AccpetedAudiences: common.NewStringSet(audience), + AcceptedAudiences: common.NewStringSet(audience), Roots: rootPool, TrustedKeys: trustedKeys, } From f9b119974d4f38834b9fe7521ee40a5f47fbb045 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 17 Dec 2014 14:18:58 -0800 Subject: [PATCH 158/165] Genericizes the yaml+environment versioned configuration parser Registry configuration parsing uses the new parser with a single version declaration and an environment prefix of "REGISTRY" --- configuration/README.md | 2 +- configuration/configuration.go | 161 +++------------------- configuration/configuration_test.go | 57 ++++++-- configuration/parser.go | 203 ++++++++++++++++++++++++++++ 4 files changed, 274 insertions(+), 149 deletions(-) create mode 100644 configuration/parser.go diff --git a/configuration/README.md b/configuration/README.md index 03ac8ab3..1219051e 100644 --- a/configuration/README.md +++ b/configuration/README.md @@ -68,6 +68,6 @@ Any configuration field other than version can be replaced by providing an envir For example, to change the loglevel to `error`, one can provide `REGISTRY_LOGLEVEL=error`, and to change the s3 storage driver's region parameter to `us-west-1`, one can provide `REGISTRY_STORAGE_S3_LOGLEVEL=us-west-1`. ### Notes -If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, changing the storage type will remove all parameters related to the old storage type. +If an environment variable changes a map value into a string, such as replacing the storage driver type with `REGISTRY_STORAGE=filesystem`, then all sub-fields will be erased. As such, specifying the storage type in the environment will remove all parameters related to the old storage configuration. By restricting all keys in the configuration file to lowercase letters and numbers, we can avoid any potential environment variable mapping ambiguity. diff --git a/configuration/configuration.go b/configuration/configuration.go index 43405279..bbb88a0e 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -4,12 +4,8 @@ import ( "fmt" "io" "io/ioutil" - "os" - "regexp" - "strconv" + "reflect" "strings" - - "gopkg.in/BrianBland/yaml.v2" ) // Configuration is a versioned registry configuration, intended to be provided by a yaml file, and @@ -39,40 +35,6 @@ type Configuration struct { // This is currently aliased to Configuration, as it is the current version type v0_1Configuration Configuration -// Version is a major/minor version pair of the form Major.Minor -// Major version upgrades indicate structure or type changes -// Minor version upgrades should be strictly additive -type Version string - -// MajorMinorVersion constructs a Version from its Major and Minor components -func MajorMinorVersion(major, minor uint) Version { - return Version(fmt.Sprintf("%d.%d", major, minor)) -} - -func (version Version) major() (uint, error) { - majorPart := strings.Split(string(version), ".")[0] - major, err := strconv.ParseUint(majorPart, 10, 0) - return uint(major), err -} - -// Major returns the major version portion of a Version -func (version Version) Major() uint { - major, _ := version.major() - return major -} - -func (version Version) minor() (uint, error) { - minorPart := strings.Split(string(version), ".")[1] - minor, err := strconv.ParseUint(minorPart, 10, 0) - return uint(minor), err -} - -// Minor returns the minor version portion of a Version -func (version Version) Minor() uint { - minor, _ := version.minor() - return minor -} - // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { @@ -223,109 +185,30 @@ func Parse(rd io.Reader) (*Configuration, error) { return nil, err } - var untypedConfig struct { - Version Version - } - var config *Configuration + p := NewParser("registry", []VersionedParseInfo{ + { + Version: MajorMinorVersion(0, 1), + ParseAs: reflect.TypeOf(v0_1Configuration{}), + ConversionFunc: func(c interface{}) (interface{}, error) { + if v0_1, ok := c.(*v0_1Configuration); ok { + if v0_1.Loglevel == Loglevel("") { + v0_1.Loglevel = Loglevel("info") + } + if v0_1.Storage.Type() == "" { + return nil, fmt.Errorf("No storage configuration provided") + } + return (*Configuration)(v0_1), nil + } + return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + }, + }, + }) - if err := yaml.Unmarshal(in, &untypedConfig); err != nil { + config := new(Configuration) + err = p.Parse(in, config) + if err != nil { return nil, err } - if untypedConfig.Version == "" { - return nil, fmt.Errorf("Please specify a configuration version. Current version is %s", CurrentVersion) - } - - // Parse the remainder of the configuration depending on the provided version - switch untypedConfig.Version { - case "0.1": - config, err = parseV0_1Registry(in) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("Unsupported configuration version %s Current version is %s", untypedConfig.Version, CurrentVersion) - } - return config, nil } - -// parseV0_1Registry parses a registry Configuration for Version 0.1 -func parseV0_1Registry(in []byte) (*Configuration, error) { - envMap := getEnvMap() - - var config v0_1Configuration - err := yaml.Unmarshal(in, &config) - if err != nil { - return nil, err - } - - // Override config.Loglevel if environment variable is provided - if loglevel, ok := envMap["REGISTRY_LOGLEVEL"]; ok { - var newLoglevel Loglevel - err := yaml.Unmarshal([]byte(loglevel), &newLoglevel) - if err != nil { - return nil, err - } - config.Loglevel = newLoglevel - } - - // Override config.Storage if environment variable is provided - if storageType, ok := envMap["REGISTRY_STORAGE"]; ok { - if storageType != config.Storage.Type() { - // Reset the storage parameters because we're using a different storage type - config.Storage = Storage{storageType: Parameters{}} - } - } - - if config.Storage.Type() == "" { - return nil, fmt.Errorf("Must provide exactly one storage type, optionally with parameters. Provided: %v", config.Storage) - } - - // Override storage parameters with all environment variables of the format: - // REGISTRY_STORAGE__ - storageParamsRegexp, err := regexp.Compile(fmt.Sprintf("^REGISTRY_STORAGE_%s_([A-Z0-9]+)$", strings.ToUpper(config.Storage.Type()))) - if err != nil { - return nil, err - } - for k, v := range envMap { - if submatches := storageParamsRegexp.FindStringSubmatch(k); submatches != nil { - config.Storage.setParameter(strings.ToLower(submatches[1]), v) - } - } - - if bugsnagAPIKey, ok := envMap["REGISTRY_REPORTING_BUGSNAG_APIKEY"]; ok { - config.Reporting.Bugsnag.APIKey = bugsnagAPIKey - } - if bugsnagReleaseStage, ok := envMap["REGISTRY_REPORTING_BUGSNAG_RELEASESTAGE"]; ok { - config.Reporting.Bugsnag.ReleaseStage = bugsnagReleaseStage - } - if bugsnagEndpoint, ok := envMap["REGISTRY_REPORTING_BUGSNAG_ENDPOINT"]; ok { - config.Reporting.Bugsnag.Endpoint = bugsnagEndpoint - } - - if newRelicLicenseKey, ok := envMap["REGISTRY_REPORTING_NEWRELIC_LICENSEKEY"]; ok { - config.Reporting.NewRelic.LicenseKey = newRelicLicenseKey - } - if newRelicName, ok := envMap["REGISTRY_REPORTING_NEWRELIC_NAME"]; ok { - config.Reporting.NewRelic.Name = newRelicName - } - - if httpAddr, ok := envMap["REGISTRY_HTTP_ADDR"]; ok { - config.HTTP.Addr = httpAddr - } - - return (*Configuration)(&config), nil -} - -// getEnvMap reads the current environment variables and converts these into a key/value map -// This is used to distinguish between empty strings returned by os.GetEnv(key) because of undefined -// environment variables and explicitly empty ones -func getEnvMap() map[string]string { - envMap := make(map[string]string) - for _, env := range os.Environ() { - envParts := strings.SplitN(env, "=", 2) - envMap[envParts[0]] = envParts[1] - } - return envMap -} diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 5c9ec9e7..75bec818 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -56,8 +56,8 @@ reporting: apikey: BugsnagApiKey ` -// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory storage driver with -// no parameters +// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory +// storage driver with no parameters var inmemoryConfigYamlV0_1 = ` version: 0.1 loglevel: info @@ -75,8 +75,8 @@ func (suite *ConfigSuite) SetUpTest(c *C) { suite.expectedConfig = copyConfig(configStruct) } -// TestMarshalRoundtrip validates that configStruct can be marshaled and unmarshaled without -// changing any parameters +// TestMarshalRoundtrip validates that configStruct can be marshaled and +// unmarshaled without changing any parameters func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) @@ -85,15 +85,16 @@ func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } -// TestParseSimple validates that configYamlV0_1 can be parsed into a struct matching configStruct +// TestParseSimple validates that configYamlV0_1 can be parsed into a struct +// matching configStruct func (suite *ConfigSuite) TestParseSimple(c *C) { config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } -// TestParseInmemory validates that configuration yaml with storage provided as a string can be -// parsed into a Configuration struct with no storage parameters +// TestParseInmemory validates that configuration yaml with storage provided as +// a string can be parsed into a Configuration struct with no storage parameters func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Reporting = Reporting{} @@ -103,9 +104,31 @@ func (suite *ConfigSuite) TestParseInmemory(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } -// TestParseWithSameEnvStorage validates that providing environment variables that match the given -// storage type and parameters will not alter the parsed Configuration struct +// TestParseIncomplete validates that an incomplete yaml configuration cannot +// be parsed without providing environment variables to fill in the missing +// components. +func (suite *ConfigSuite) TestParseIncomplete(c *C) { + incompleteConfigYaml := "version: 0.1" + _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, NotNil) + + suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Reporting = Reporting{} + + os.Setenv("REGISTRY_STORAGE", "filesystem") + os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + + config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, suite.expectedConfig) +} + +// TestParseWithSameEnvStorage validates that providing environment variables +// that match the given storage type will only include environment-defined +// parameters and remove yaml-defined parameters func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { + suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} + os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") @@ -180,6 +203,22 @@ func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { c.Assert(config, DeepEquals, suite.expectedConfig) } +// TestParseInvalidLoglevel validates that the parser will fail to parse a +// configuration if the loglevel is malformed +func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { + invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" + _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) + c.Assert(err, NotNil) + + os.Setenv("REGISTRY_LOGLEVEL", "derp") + + _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) + c.Assert(err, NotNil) + +} + +// TestParseWithDifferentEnvReporting validates that environment variables +// properly override reporting parameters func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" diff --git a/configuration/parser.go b/configuration/parser.go new file mode 100644 index 00000000..ca5f9afd --- /dev/null +++ b/configuration/parser.go @@ -0,0 +1,203 @@ +package configuration + +import ( + "fmt" + "os" + "reflect" + "regexp" + "strconv" + "strings" + + "gopkg.in/BrianBland/yaml.v2" +) + +// Version is a major/minor version pair of the form Major.Minor +// Major version upgrades indicate structure or type changes +// Minor version upgrades should be strictly additive +type Version string + +// MajorMinorVersion constructs a Version from its Major and Minor components +func MajorMinorVersion(major, minor uint) Version { + return Version(fmt.Sprintf("%d.%d", major, minor)) +} + +func (version Version) major() (uint, error) { + majorPart := strings.Split(string(version), ".")[0] + major, err := strconv.ParseUint(majorPart, 10, 0) + return uint(major), err +} + +// Major returns the major version portion of a Version +func (version Version) Major() uint { + major, _ := version.major() + return major +} + +func (version Version) minor() (uint, error) { + minorPart := strings.Split(string(version), ".")[1] + minor, err := strconv.ParseUint(minorPart, 10, 0) + return uint(minor), err +} + +// Minor returns the minor version portion of a Version +func (version Version) Minor() uint { + minor, _ := version.minor() + return minor +} + +// VersionedParseInfo defines how a specific version of a configuration should +// be parsed into the current version +type VersionedParseInfo struct { + // Version is the version which this parsing information relates to + Version Version + // ParseAs defines the type which a configuration file of this version + // should be parsed into + ParseAs reflect.Type + // ConversionFunc defines a method for converting the parsed configuration + // (of type ParseAs) into the current configuration version + // Note: this method signature is very unclear with the absence of generics + ConversionFunc func(interface{}) (interface{}, error) +} + +// Parser can be used to parse a configuration file and environment of a defined +// version into a unified output structure +type Parser struct { + prefix string + mapping map[Version]VersionedParseInfo + env map[string]string +} + +// NewParser returns a *Parser with the given environment prefix which handles +// versioned configurations which match the given parseInfos +func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { + p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo), env: make(map[string]string)} + + for _, parseInfo := range parseInfos { + p.mapping[parseInfo.Version] = parseInfo + } + + for _, env := range os.Environ() { + envParts := strings.SplitN(env, "=", 2) + p.env[envParts[0]] = envParts[1] + } + + return &p +} + +// Parse reads in the given []byte and environment and writes the resulting +// configuration into the input v +// +// Environment variables may be used to override configuration parameters other +// than version, following the scheme below: +// v.Abc may be replaced by the value of PREFIX_ABC, +// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth +func (p *Parser) Parse(in []byte, v interface{}) error { + var versionedStruct struct { + Version Version + } + + if err := yaml.Unmarshal(in, &versionedStruct); err != nil { + return err + } + + parseInfo, ok := p.mapping[versionedStruct.Version] + if !ok { + return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + } + + parseAs := reflect.New(parseInfo.ParseAs) + err := yaml.Unmarshal(in, parseAs.Interface()) + if err != nil { + return err + } + + err = p.overwriteFields(parseAs, p.prefix) + if err != nil { + return err + } + + c, err := parseInfo.ConversionFunc(parseAs.Interface()) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) + return nil +} + +func (p *Parser) overwriteFields(v reflect.Value, prefix string) error { + for v.Kind() == reflect.Ptr { + v = reflect.Indirect(v) + } + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + sf := v.Type().Field(i) + fieldPrefix := strings.ToUpper(prefix + "_" + sf.Name) + if e, ok := p.env[fieldPrefix]; ok { + fieldVal := reflect.New(sf.Type) + err := yaml.Unmarshal([]byte(e), fieldVal.Interface()) + if err != nil { + return err + } + v.Field(i).Set(reflect.Indirect(fieldVal)) + } + err := p.overwriteFields(v.Field(i), fieldPrefix) + if err != nil { + return err + } + } + case reflect.Map: + p.overwriteMap(v, prefix) + } + return nil +} + +func (p *Parser) overwriteMap(m reflect.Value, prefix string) error { + switch m.Type().Elem().Kind() { + case reflect.Struct: + for _, k := range m.MapKeys() { + err := p.overwriteFields(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) + if err != nil { + return err + } + } + envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) + if err != nil { + return err + } + for key, val := range p.env { + if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { + mapValue := reflect.New(m.Type().Elem()) + err := yaml.Unmarshal([]byte(val), mapValue.Interface()) + if err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) + } + } + case reflect.Map: + for _, k := range m.MapKeys() { + err := p.overwriteMap(m.MapIndex(k), strings.ToUpper(fmt.Sprintf("%s_%s", prefix, k))) + if err != nil { + return err + } + } + default: + envMapRegexp, err := regexp.Compile(fmt.Sprintf("^%s_([A-Z0-9]+)$", strings.ToUpper(prefix))) + if err != nil { + return err + } + + for key, val := range p.env { + if submatches := envMapRegexp.FindStringSubmatch(key); submatches != nil { + mapValue := reflect.New(m.Type().Elem()) + err := yaml.Unmarshal([]byte(val), mapValue.Interface()) + if err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(strings.ToLower(submatches[1])), reflect.Indirect(mapValue)) + } + } + } + return nil +} From 2e3af8efcf416debb69cdf85318bb666758255f2 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 17 Dec 2014 16:10:07 -0800 Subject: [PATCH 159/165] Refactor token verification to support x5c header Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/token/token.go | 208 +++++++++++++++++++++++--------------------- 1 file changed, 110 insertions(+), 98 deletions(-) diff --git a/auth/token/token.go b/auth/token/token.go index 568b257a..8752afae 100644 --- a/auth/token/token.go +++ b/auth/token/token.go @@ -53,25 +53,11 @@ type ClaimSet struct { // Header describes the header section of a JSON Web Token. type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - RawJWK json.RawMessage `json:"jwk"` - SigningKey libtrust.PublicKey `json:"-"` -} - -// CheckSigningKey parses the `jwk` field of a JOSE header and sets the -// SigningKey field if it is valid. -func (h *Header) CheckSigningKey() (err error) { - if len(h.RawJWK) == 0 { - // No signing key was specified. - return - } - - h.SigningKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(h.RawJWK)) - h.RawJWK = nil // Don't need this anymore! - - return + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK json.RawMessage `json:"jwk,omitempty"` } // Token describes a JSON Web Token. @@ -135,10 +121,6 @@ func NewToken(rawToken string) (*Token, error) { return nil, ErrMalformedToken } - if err = token.Header.CheckSigningKey(); err != nil { - return nil, ErrMalformedToken - } - if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { return nil, ErrMalformedToken } @@ -174,108 +156,86 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { return ErrInvalidToken } - // If the token header has a SigningKey field, verify the signature - // using that key and its included x509 certificate chain if necessary. - // If the Header's SigningKey field is nil, try using the KeyID field. - signingKey := t.Header.SigningKey - - if signingKey == nil { - // Find the key in the given collection of trusted keys. - trustedKey, ok := verifyOpts.TrustedKeys[t.Header.KeyID] - if !ok { - log.Errorf("token signed by untrusted key with ID: %q", t.Header.KeyID) - return ErrInvalidToken - } - signingKey = trustedKey + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Error(err) + return ErrInvalidToken } - // First verify the signature of the token using the key which signed it. + // Finally, verify the signature of the token using the key which signed it. if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { log.Errorf("unable to verify token signature: %s", err) return ErrInvalidToken } - // Next, check if the signing key is one of the trusted keys. - if _, isTrustedKey := verifyOpts.TrustedKeys[signingKey.KeyID()]; isTrustedKey { - // We're done! The token was signed by - // a trusted key and has been verified! - return nil - } - - // Otherwise, we need to check the sigining keys included certificate chain. - return t.verifyCertificateChain(signingKey, verifyOpts.Roots) + return nil } -// verifyCertificateChain attempts to verify the token using the "x5c" field -// of the given leafKey which was used to sign it. Returns a nil error if -// the key's certificate chain is valid and rooted an one of the given roots. -func (t *Token) verifyCertificateChain(leafKey libtrust.PublicKey, roots *x509.CertPool) error { - // In this case, the token signature is valid, but the key that signed it - // is not in our set of trusted keys. So, we'll need to check if the - // token's signing key included an x509 certificate chain that can be - // verified up to one of our trusted roots. - x5cVal, ok := leafKey.GetExtendedField("x5c").([]interface{}) - if !ok || x5cVal == nil { - log.Error("unable to verify token signature: signed by untrusted key with no valid certificate chain") - return ErrInvalidToken +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case len(rawJWK) > 0: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") } - // Ensure each item is of the correct type. - x5c := make([]string, len(x5cVal)) - for i, val := range x5cVal { - certString, ok := val.(string) - if !ok || len(certString) == 0 { - log.Error("unable to verify token signature: signed by untrusted key with malformed certificate chain") - return ErrInvalidToken - } - x5c[i] = certString + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") } // Ensure the first element is encoded correctly. leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) if err != nil { - log.Errorf("unable to decode signing key leaf cert: %s", err) - return ErrInvalidToken + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) } // And that it is a valid x509 certificate. leafCert, err := x509.ParseCertificate(leafCertDer) if err != nil { - log.Errorf("unable to parse signing key leaf cert: %s", err) - return ErrInvalidToken + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) } - // Verify that the public key in the leaf cert *is* the signing key. - leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) - if !ok { - log.Error("unable to get signing key leaf cert public key value") - return ErrInvalidToken - } - - leafPubKey, err := libtrust.FromCryptoPublicKey(leafCryptoKey) - if err != nil { - log.Errorf("unable to make libtrust public key from signing key leaf cert: %s", err) - return ErrInvalidToken - } - - if leafPubKey.KeyID() != leafKey.KeyID() { - log.Error("token signing key ID and leaf certificate public key ID do not match") - return ErrInvalidToken - } - - // The rest of the x5c array are intermediate certificates. + // The rest of the certificate chain are intermediate certificates. intermediates := x509.NewCertPool() for i := 1; i < len(x5c); i++ { intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) if err != nil { - log.Errorf("unable to decode signing key intermediate cert: %s", err) - return ErrInvalidToken + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) } intermediateCert, err := x509.ParseCertificate(intermediateCertDer) if err != nil { - log.Errorf("unable to parse signing key intermediate cert: %s", err) - return ErrInvalidToken + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) } intermediates.AddCert(intermediateCert) @@ -290,12 +250,64 @@ func (t *Token) verifyCertificateChain(leafKey libtrust.PublicKey, roots *x509.C // TODO: this call returns certificate chains which we ignore for now, but // we should check them for revocations if we have the ability later. if _, err = leafCert.Verify(verifyOpts); err != nil { - log.Errorf("unable to verify signing key certificate: %s", err) - return ErrInvalidToken + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) } - // The signing key's x509 chain is valid! - return nil + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return } // accessSet returns a set of actions available for the resource From 4c42477abfc5bce6d584b7039e826f3c47b720f9 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Wed, 17 Dec 2014 16:16:02 -0800 Subject: [PATCH 160/165] Refactored out usage of strconv.Quote() Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- auth/token/accesscontroller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/auth/token/accesscontroller.go b/auth/token/accesscontroller.go index f7ca4d52..b9ee5771 100644 --- a/auth/token/accesscontroller.go +++ b/auth/token/accesscontroller.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "net/http" "os" - "strconv" "strings" "github.com/docker/libtrust" @@ -97,16 +96,16 @@ func (ac *authChallenge) Status() int { // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 func (ac *authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%s,service=%s", strconv.Quote(ac.realm), strconv.Quote(ac.service)) + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { - str = fmt.Sprintf("%s,scope=%s", str, strconv.Quote(scope)) + str = fmt.Sprintf("%s,scope=%q", str, scope) } if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { - str = fmt.Sprintf("%s,error=%s", str, strconv.Quote("invalid_token")) + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") } else if ac.err == ErrInsufficientScope { - str = fmt.Sprintf("%s,error=%s", str, strconv.Quote("insufficient_scope")) + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") } return str From bc8ab9b3921c6e290adfc5197a31259bbfe48839 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 17 Dec 2014 16:56:36 -0800 Subject: [PATCH 161/165] Fixes filesystem storage driver List semantics for nonexistent directory Now returns a PathNotFoundError --- storagedriver/filesystem/driver.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 3c3c950f..0fb5e6fc 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -201,6 +201,9 @@ func (d *Driver) List(subPath string) ([]string, error) { dir, err := os.Open(fullPath) if err != nil { + if os.IsNotExist(err) { + return nil, storagedriver.PathNotFoundError{Path: subPath} + } return nil, err } From e600f7ccb753cae680327775fe4228d55348f7d2 Mon Sep 17 00:00:00 2001 From: Olivier Gambier Date: Wed, 17 Dec 2014 11:55:24 -0800 Subject: [PATCH 162/165] Erm --- circle.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/circle.yml b/circle.yml index 0d75d2ac..cbd545d4 100644 --- a/circle.yml +++ b/circle.yml @@ -1,3 +1,4 @@ +# Pony-up! machine: pre: # Install gvm @@ -17,7 +18,7 @@ machine: BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR - # Workaround Circle parsing madness bugs + # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" hosts: @@ -66,9 +67,6 @@ test: - gvm use stable && go version # - gvm use bleed && go version - # Preset the goverall report file - - echo "$CIRCLE_PAIN" > ~/goverage.report - # FMT - gvm use stable && test -z "$(gofmt -s -l . | tee /dev/stderr)": pwd: $BASE_STABLE @@ -92,12 +90,19 @@ test: # pwd: $BASE_BLEED # Test stable, and report - - gvm use stable; go list ./... | go test -test.short -coverprofile=coverage.out && cat coverage.out | sed -n '1!p' >> ~/goverage.report: + # Preset the goverall report file + - echo "$CIRCLE_PAIN" > ~/goverage.report + - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: + pwd: $BASE_STABLE + + - gvm use stable; go list ./... | xargs -L 1 -I{} go test -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: timeout: 600 pwd: $BASE_STABLE post: - # Report to coveralls + # Aggregate and report to coveralls + - gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report: + pwd: $BASE_STABLE - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN: pwd: $BASE_STABLE From 030b0ff310e386aca4c60dd74c96d587dad098a7 Mon Sep 17 00:00:00 2001 From: Brian Bland Date: Wed, 17 Dec 2014 19:06:55 -0800 Subject: [PATCH 163/165] Allows storagedriver parameter values to be of type interface{} This enables use of nil, booleans, numeric types, and even complex structures for parameter values, assuming they can be parsed from yaml. --- configuration/configuration.go | 4 ++-- configuration/configuration_test.go | 12 ++++++------ storagedriver/factory/factory.go | 4 ++-- storagedriver/filesystem/driver.go | 6 +++--- storagedriver/inmemory/driver.go | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index bbb88a0e..8e68190d 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -103,7 +103,7 @@ func (storage Storage) Parameters() Parameters { } // setParameter changes the parameter at the provided key to the new value -func (storage Storage) setParameter(key, value string) { +func (storage Storage) setParameter(key string, value interface{}) { storage[storage.Type()][key] = value } @@ -143,7 +143,7 @@ func (storage Storage) MarshalYAML() (interface{}, error) { } // Parameters defines a key-value parameters mapping -type Parameters map[string]string +type Parameters map[string]interface{} // Reporting defines error reporting methods. type Reporting struct { diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 75bec818..e6fd6295 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -21,12 +21,12 @@ var configStruct = Configuration{ "region": "us-east-1", "bucket": "my-bucket", "rootpath": "/registry", - "encrypt": "true", - "secure": "false", + "encrypt": true, + "secure": false, "accesskey": "SAMPLEACCESSKEY", "secretkey": "SUPERSECRET", - "host": "", - "port": "", + "host": nil, + "port": 42, }, }, Reporting: Reporting{ @@ -50,7 +50,7 @@ storage: accesskey: SAMPLEACCESSKEY secretkey: SUPERSECRET host: ~ - port: ~ + port: 42 reporting: bugsnag: apikey: BugsnagApiKey @@ -142,7 +142,7 @@ func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { // Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { suite.expectedConfig.Storage.setParameter("region", "us-west-1") - suite.expectedConfig.Storage.setParameter("secure", "true") + suite.expectedConfig.Storage.setParameter("secure", true) suite.expectedConfig.Storage.setParameter("newparam", "some Value") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") diff --git a/storagedriver/factory/factory.go b/storagedriver/factory/factory.go index 0f8ca001..254cd9bb 100644 --- a/storagedriver/factory/factory.go +++ b/storagedriver/factory/factory.go @@ -16,7 +16,7 @@ type StorageDriverFactory interface { // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored // Each parameter key must only consist of lowercase letters and numbers - Create(parameters map[string]string) (storagedriver.StorageDriver, error) + Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) } // Register makes a storage driver available by the provided name. @@ -37,7 +37,7 @@ func Register(name string, factory StorageDriverFactory) { // To run in-process, the StorageDriverFactory must first be registered with the given name // If no in-process drivers are found with the given name, this attempts to create an IPC driver // If no in-process or external drivers are found, an InvalidStorageDriverError is returned -func Create(name string, parameters map[string]string) (storagedriver.StorageDriver, error) { +func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { return nil, InvalidStorageDriverError{name} diff --git a/storagedriver/filesystem/driver.go b/storagedriver/filesystem/driver.go index 3c3c950f..635c13a2 100644 --- a/storagedriver/filesystem/driver.go +++ b/storagedriver/filesystem/driver.go @@ -23,7 +23,7 @@ func init() { // filesystemDriverFactory implements the factory.StorageDriverFactory interface type filesystemDriverFactory struct{} -func (factory *filesystemDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { +func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters), nil } @@ -36,12 +36,12 @@ type Driver struct { // FromParameters constructs a new Driver with a given parameters map // Optional Parameters: // - rootdirectory -func FromParameters(parameters map[string]string) *Driver { +func FromParameters(parameters map[string]interface{}) *Driver { var rootDirectory = defaultRootDirectory if parameters != nil { rootDir, ok := parameters["rootdirectory"] if ok { - rootDirectory = rootDir + rootDirectory = fmt.Sprint(rootDir) } } return New(rootDirectory) diff --git a/storagedriver/inmemory/driver.go b/storagedriver/inmemory/driver.go index 7481c472..2e23c758 100644 --- a/storagedriver/inmemory/driver.go +++ b/storagedriver/inmemory/driver.go @@ -21,7 +21,7 @@ func init() { // inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. type inMemoryDriverFactory struct{} -func (factory *inMemoryDriverFactory) Create(parameters map[string]string) (storagedriver.StorageDriver, error) { +func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return New(), nil } From d0a9e9b4755efdaafd8e34a228898322ac14cc5b Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 18 Dec 2014 12:30:19 -0800 Subject: [PATCH 164/165] Integrate auth.AccessController into registry app This changeset integrates the AccessController into the main registry app. This includes support for configuration and a test implementation, called "silly" auth. Auth is only enabled if the configuration is present but takes measure to ensure that configuration errors don't allow the appserver to start with open access. --- api/v2/descriptors.go | 8 ++ api/v2/errors.go | 3 + app.go | 110 +++++++++++++++++++++++++--- app_test.go | 63 ++++++++++++++++ auth/silly/access.go | 89 ++++++++++++++++++++++ auth/silly/access_test.go | 58 +++++++++++++++ cmd/registry/main.go | 6 +- configuration/configuration.go | 71 +++++++++++++++++- configuration/configuration_test.go | 22 ++++++ 9 files changed, 413 insertions(+), 17 deletions(-) create mode 100644 auth/silly/access.go create mode 100644 auth/silly/access_test.go diff --git a/api/v2/descriptors.go b/api/v2/descriptors.go index 68d18241..97e41b74 100644 --- a/api/v2/descriptors.go +++ b/api/v2/descriptors.go @@ -39,6 +39,14 @@ var ErrorDescriptors = []ErrorDescriptor{ Description: `Generic error returned when the error does not have an API classification.`, }, + { + Code: ErrorCodeUnauthorized, + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + }, { Code: ErrorCodeDigestInvalid, Value: "DIGEST_INVALID", diff --git a/api/v2/errors.go b/api/v2/errors.go index 8c85d3a9..94c646fc 100644 --- a/api/v2/errors.go +++ b/api/v2/errors.go @@ -13,6 +13,9 @@ const ( // ErrorCodeUnknown is a catch-all for errors not defined below. ErrorCodeUnknown ErrorCode = iota + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized + // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid diff --git a/app.go b/app.go index 5a770c6c..7d1ee9bb 100644 --- a/app.go +++ b/app.go @@ -5,11 +5,11 @@ import ( "net/http" "github.com/docker/docker-registry/api/v2" - "github.com/docker/docker-registry/storagedriver" - "github.com/docker/docker-registry/storagedriver/factory" - + "github.com/docker/docker-registry/auth" "github.com/docker/docker-registry/configuration" "github.com/docker/docker-registry/storage" + "github.com/docker/docker-registry/storagedriver" + "github.com/docker/docker-registry/storagedriver/factory" log "github.com/Sirupsen/logrus" "github.com/gorilla/mux" @@ -28,6 +28,8 @@ type App struct { // services contains the main services instance for the application. services *storage.Services + + accessController auth.AccessController } // NewApp takes a configuration and returns a configured app, ready to serve @@ -61,6 +63,16 @@ func NewApp(configuration configuration.Configuration) *App { app.driver = driver app.services = storage.NewServices(app.driver) + authType := configuration.Auth.Type() + + if authType != "" { + accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + if err != nil { + panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + } + app.accessController = accessController + } + return app } @@ -111,15 +123,11 @@ func (ssrw *singleStatusResponseWriter) WriteHeader(status int) { // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - context := &Context{ - App: app, - Name: vars["name"], - urlBuilder: v2.NewURLBuilderFromRequest(r), - } + context := app.context(r) - // Store vars for underlying handlers. - context.vars = vars + if err := app.authorized(w, r, context); err != nil { + return + } context.log = log.WithField("name", context.Name) handler := dispatch(context, r) @@ -140,6 +148,86 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { }) } +// context constructs the context object for the application. This only be +// called once per request. +func (app *App) context(r *http.Request) *Context { + vars := mux.Vars(r) + context := &Context{ + App: app, + Name: vars["name"], + urlBuilder: v2.NewURLBuilderFromRequest(r), + } + + // Store vars for underlying handlers. + context.vars = vars + + return context +} + +// authorized checks if the request can proceed with with request access- +// level. If it cannot, the method will return an error. +func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { + if app.accessController == nil { + return nil // access controller is not enabled. + } + + var accessRecords []auth.Access + resource := auth.Resource{ + Type: "repository", + Name: context.Name, + } + + switch r.Method { + case "GET", "HEAD": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + + if err := app.accessController.Authorized(r, accessRecords...); err != nil { + switch err := err.(type) { + case auth.Challenge: + w.Header().Set("Content-Type", "application/json") + err.ServeHTTP(w, r) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized, accessRecords) + serveJSON(w, errs) + default: + // This condition is a potential security problem either in + // the configuration or whatever is backing the access + // controller. Just return a bad request with no information + // to avoid exposure. The request should not proceed. + context.log.Errorf("error checking authorization: %v", err) + w.WriteHeader(http.StatusBadRequest) + } + + return err + } + + return nil +} + // apiBase implements a simple yes-man for doing overall checks against the // api. This can support auth roundtrips to support docker login. func apiBase(w http.ResponseWriter, r *http.Request) { diff --git a/app_test.go b/app_test.go index f256c968..3e9d191d 100644 --- a/app_test.go +++ b/app_test.go @@ -1,12 +1,14 @@ package registry import ( + "encoding/json" "net/http" "net/http/httptest" "net/url" "testing" "github.com/docker/docker-registry/api/v2" + _ "github.com/docker/docker-registry/auth/silly" "github.com/docker/docker-registry/configuration" ) @@ -124,3 +126,64 @@ func TestAppDispatcher(t *testing.T) { } } } + +// TestNewApp covers the creation of an application via NewApp with a +// configuration. +func TestNewApp(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": nil, + }, + Auth: configuration.Auth{ + // For now, we simply test that new auth results in a viable + // application. + "silly": { + "realm": "realm-test", + "service": "service-test", + }, + }, + } + + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. + app := NewApp(config) + + server := httptest.NewServer(app) + builder, err := v2.NewURLBuilderFromString(server.URL) + if err != nil { + t.Fatalf("error creating urlbuilder: %v", err) + } + + baseURL, err := builder.BuildBaseURL() + if err != nil { + t.Fatalf("error creating baseURL: %v", err) + } + + // TODO(stevvooe): The rest of this test might belong in the API tests. + + // Just hit the app and make sure we get a 401 Unauthorized error. + req, err := http.Get(baseURL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer req.Body.Close() + + if req.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected status code during request: %v", err) + } + + if req.Header.Get("Content-Type") != "application/json" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json") + } + + var errs v2.Errors + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&errs); err != nil { + t.Fatalf("error decoding error response: %v", err) + } + + if errs.Errors[0].Code != v2.ErrorCodeUnauthorized { + t.Fatalf("unexpected error code: %v != %v", errs.Errors[0].Code, v2.ErrorCodeUnauthorized) + } +} diff --git a/auth/silly/access.go b/auth/silly/access.go new file mode 100644 index 00000000..a747fb6d --- /dev/null +++ b/auth/silly/access.go @@ -0,0 +1,89 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/docker-registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(req *http.Request, accessRecords ...auth.Access) error { + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return &challenge + } + + return nil +} + +type challenge struct { + realm string + service string + scope string +} + +func (ch *challenge) ServeHTTP(w http.ResponseWriter, r *http.Request) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("Authorization", header) + w.WriteHeader(http.StatusUnauthorized) +} + +func (ch *challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/auth/silly/access_test.go b/auth/silly/access_test.go new file mode 100644 index 00000000..a412c101 --- /dev/null +++ b/auth/silly/access_test.go @@ -0,0 +1,58 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker-registry/auth" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if err := ac.Authorized(r); err != nil { + switch err := err.(type) { + case auth.Challenge: + err.ServeHTTP(w, r) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/cmd/registry/main.go b/cmd/registry/main.go index ba859eec..ea20a916 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -7,14 +7,14 @@ import ( _ "net/http/pprof" "os" - "github.com/gorilla/handlers" - log "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" + "github.com/gorilla/handlers" "github.com/yvasiyarov/gorelic" "github.com/docker/docker-registry" + _ "github.com/docker/docker-registry/auth/silly" + _ "github.com/docker/docker-registry/auth/token" "github.com/docker/docker-registry/configuration" _ "github.com/docker/docker-registry/storagedriver/filesystem" _ "github.com/docker/docker-registry/storagedriver/inmemory" diff --git a/configuration/configuration.go b/configuration/configuration.go index 8e68190d..6ac64147 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -20,6 +20,10 @@ type Configuration struct { // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` + // Auth allows configuration of various authorization methods that may be + // used to gate requests. + Auth Auth `yaml:"auth"` + // Reporting is the configuration for error reporting Reporting Reporting `yaml:"reporting"` @@ -85,6 +89,9 @@ func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error return nil } +// Parameters defines a key-value parameters mapping +type Parameters map[string]interface{} + // Storage defines the configuration for registry object storage type Storage map[string]Parameters @@ -137,13 +144,71 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements the yaml.Marshaler interface func (storage Storage) MarshalYAML() (interface{}, error) { if storage.Parameters() == nil { - return storage.Type, nil + return storage.Type(), nil } return map[string]Parameters(storage), nil } -// Parameters defines a key-value parameters mapping -type Parameters map[string]interface{} +// Auth defines the configuration for registry authorization. +type Auth map[string]Parameters + +// Type returns the storage driver type, such as filesystem or s3 +func (auth Auth) Type() string { + // Return only key in this map + for k := range auth { + return k + } + return "" +} + +// Parameters returns the Parameters map for an Auth configuration +func (auth Auth) Parameters() Parameters { + return auth[auth.Type()] +} + +// setParameter changes the parameter at the provided key to the new value +func (auth Auth) setParameter(key string, value interface{}) { + auth[auth.Type()][key] = value +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters +func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]Parameters + err := unmarshal(&m) + if err == nil { + if len(m) > 1 { + types := make([]string, 0, len(m)) + for k := range m { + types = append(types, k) + } + + // TODO(stevvooe): May want to change this slightly for + // authorization to allow multiple challenges. + return fmt.Errorf("must provide exactly one type. Provided: %v", types) + + } + *auth = m + return nil + } + + var authType string + err = unmarshal(&authType) + if err == nil { + *auth = Auth{authType: Parameters{}} + return nil + } + + return err +} + +// MarshalYAML implements the yaml.Marshaler interface +func (auth Auth) MarshalYAML() (interface{}, error) { + if auth.Parameters() == nil { + return auth.Type(), nil + } + return map[string]Parameters(auth), nil +} // Reporting defines error reporting methods. type Reporting struct { diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index e6fd6295..91169e03 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -29,6 +29,12 @@ var configStruct = Configuration{ "port": 42, }, }, + Auth: Auth{ + "silly": Parameters{ + "realm": "silly", + "service": "silly", + }, + }, Reporting: Reporting{ Bugsnag: BugsnagReporting{ APIKey: "BugsnagApiKey", @@ -51,6 +57,10 @@ storage: secretkey: SUPERSECRET host: ~ port: 42 +auth: + silly: + realm: silly + service: silly reporting: bugsnag: apikey: BugsnagApiKey @@ -62,6 +72,10 @@ var inmemoryConfigYamlV0_1 = ` version: 0.1 loglevel: info storage: inmemory +auth: + silly: + realm: silly + service: silly ` type ConfigSuite struct { @@ -113,10 +127,13 @@ func (suite *ConfigSuite) TestParseIncomplete(c *C) { c.Assert(err, NotNil) suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} + suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} suite.expectedConfig.Reporting = Reporting{} os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") + os.Setenv("REGISTRY_AUTH", "silly") + os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) c.Assert(err, IsNil) @@ -259,5 +276,10 @@ func copyConfig(config Configuration) *Configuration { NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name}, } + configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} + for k, v := range config.Auth.Parameters() { + configCopy.Auth.setParameter(k, v) + } + return configCopy } From b1f36c3fe52c46f68bd55a6c2db88b52ec491da6 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 18 Dec 2014 17:20:35 -0800 Subject: [PATCH 165/165] Ensure that unset Context.Name only allowed on base route If Context.Name is not set, the acceess controller may allow an unintended request through. By only allowing a request to proceed without a name on the base route, we provide some protection if future bugs forget to set the context properly. --- app.go | 78 ++++++++++++++++++++++++++++---------------- app_test.go | 5 +++ auth/silly/access.go | 2 +- 3 files changed, 55 insertions(+), 30 deletions(-) diff --git a/app.go b/app.go index 7d1ee9bb..5f1fe52d 100644 --- a/app.go +++ b/app.go @@ -172,36 +172,56 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont } var accessRecords []auth.Access - resource := auth.Resource{ - Type: "repository", - Name: context.Name, - } - switch r.Method { - case "GET", "HEAD": - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "pull", - }) - case "POST", "PUT", "PATCH": - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "pull", - }, - auth.Access{ - Resource: resource, - Action: "push", - }) - case "DELETE": - // DELETE access requires full admin rights, which is represented - // as "*". This may not be ideal. - accessRecords = append(accessRecords, - auth.Access{ - Resource: resource, - Action: "*", - }) + if context.Name != "" { + resource := auth.Resource{ + Type: "repository", + Name: context.Name, + } + + switch r.Method { + case "GET", "HEAD": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }) + case "POST", "PUT", "PATCH": + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "pull", + }, + auth.Access{ + Resource: resource, + Action: "push", + }) + case "DELETE": + // DELETE access requires full admin rights, which is represented + // as "*". This may not be ideal. + accessRecords = append(accessRecords, + auth.Access{ + Resource: resource, + Action: "*", + }) + } + } else { + // Only allow the name not to be set on the base route. + route := mux.CurrentRoute(r) + + if route == nil || route.GetName() != v2.RouteNameBase { + // For this to be properly secured, context.Name must always be set + // for a resource that may make a modification. The only condition + // under which name is not set and we still allow access is when the + // base route is accessed. This section prevents us from making that + // mistake elsewhere in the code, allowing any operation to proceed. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusForbidden) + + var errs v2.Errors + errs.Push(v2.ErrorCodeUnauthorized) + serveJSON(w, errs) + } } if err := app.accessController.Authorized(r, accessRecords...); err != nil { diff --git a/app_test.go b/app_test.go index 3e9d191d..b8edbd64 100644 --- a/app_test.go +++ b/app_test.go @@ -177,6 +177,11 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json") } + expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" + if req.Header.Get("Authorization") != expectedAuthHeader { + t.Fatalf("unexpected authorization header: %q != %q", req.Header.Get("Authorization"), expectedAuthHeader) + } + var errs v2.Errors dec := json.NewDecoder(req.Body) if err := dec.Decode(&errs); err != nil { diff --git a/auth/silly/access.go b/auth/silly/access.go index a747fb6d..4995d0dc 100644 --- a/auth/silly/access.go +++ b/auth/silly/access.go @@ -51,7 +51,7 @@ func (ac *accessController) Authorized(req *http.Request, accessRecords ...auth. if len(accessRecords) > 0 { var scopes []string for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource, access.Action)) + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) } challenge.scope = strings.Join(scopes, " ") }